diff --git a/application/__init__.py b/application/__init__.py index 1bf50a5..01e6d4f 100644 --- a/application/__init__.py +++ b/application/__init__.py @@ -1,5 +1 @@ from __future__ import absolute_import, unicode_literals -# This will make sure celery is always imported when -# Django starts so that shared_task will use this app. -from .celeryapp import app as celery_app -__all__ = ['celery_app'] \ No newline at end of file diff --git a/application/admin.py b/application/admin.py index 02ff7b2..6bef560 100644 --- a/application/admin.py +++ b/application/admin.py @@ -4,8 +4,7 @@ from django.contrib.auth.admin import UserAdmin as BaseUserAdmin from django.contrib.auth.models import User from .models import Post, CustomUser -from .models import ScheduledReport, ReportRecipient, ScheduledReportGroup -from .forms import ScheduledReportForm + class CustomUserInline(admin.StackedInline): @@ -23,42 +22,3 @@ admin.site.register(User, UserAdmin) admin.site.register(Post) - -class ReportRecipientAdmin(admin.TabularInline): - model = ReportRecipient -class ScheduledReportAdmin(admin.ModelAdmin): - """ - List display for Scheduled reports in Django admin - """ - model = ScheduledReport - list_display = ('id', 'get_recipients') - inlines = [ - ReportRecipientAdmin - ] - form = ScheduledReportForm - def get_recipients(self, model): - recipients = model.reportrecep.all().values_list('email', flat=True) - if not recipients: - return 'No recipients added' - recipient_list = '' - for recipient in recipients: - recipient_list = recipient_list + recipient + ', ' - return recipient_list[:-2] - get_recipients.short_description = 'Recipients' - get_recipients.allow_tags = True -class ScheduledReportGroupAdmin(admin.ModelAdmin): - """ - List display for ScheduledReportGroup Admin - """ - model = ScheduledReportGroup - list_display = ('get_scheduled_report_name','get_report_name') - def get_scheduled_report_name(self, model): - return model.scheduled_report.subject - def get_report_name(self, model): - return model.report.name - get_scheduled_report_name.short_description = "Scheduled Report Name" - get_report_name.short_description = "Report Name" - show_change_link = True - get_report_name.allow_tags = True -admin.site.register(ScheduledReport, ScheduledReportAdmin) -admin.site.register(ScheduledReportGroup, ScheduledReportGroupAdmin) \ No newline at end of file diff --git a/application/celeryapp.py b/application/celeryapp.py deleted file mode 100644 index 7848051..0000000 --- a/application/celeryapp.py +++ /dev/null @@ -1,13 +0,0 @@ -from __future__ import absolute_import -import os -from celery import Celery -# set the default Django settings module for the 'celery' program. -os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'application.settings') -from django.conf import settings - -app = Celery('application') -# Using a string here means the worker don't have to serialize -# the configuration object to child processes. -app.config_from_object('django.conf:settings') -# Load task modules from all registered Django app configs. -app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) \ No newline at end of file diff --git a/application/forms.py b/application/forms.py index 188d7ce..724d9c5 100644 --- a/application/forms.py +++ b/application/forms.py @@ -9,7 +9,6 @@ from django.contrib.auth.forms import UserCreationForm, UserChangeForm from datetime import datetime from croniter import croniter from django.forms import ModelForm, ValidationError -from .models import ScheduledReport class PostForm(forms.ModelForm): class Meta: @@ -21,27 +20,3 @@ class NewTagForm(forms.ModelForm): class Meta: model = CustomUser fields = ['m_tags'] - - -class ScheduledReportForm(ModelForm): - class Meta: - model = ScheduledReport - fields = ['subject', 'cron_expression'] - fields = ['subject', 'cron_expression'] - help_texts = {'cron_expression': 'Scheduled time is considered in UTC'} - def clean(self): - cleaned_data = super(ScheduledReportForm, self).clean() - cron_expression = cleaned_data.get("cron_expression") - try: - iter = croniter(cron_expression, datetime.now()) - except: - raise ValidationError("Incorrect cron expression:\ - The information you must include is (in order of appearance):\ - A number (or list of numbers, or range of numbers), m, representing the minute of the hour\ - A number (or list of numbers, or range of numbers), h, representing the hour of the day\ - A number (or list of numbers, or range of numbers), dom, representing the day of the month\ - A number (or list, or range), or name (or list of names), mon, representing the month of the year\ - A number (or list, or range), or name (or list of names), dow, representing the day of the week\ - The asterisks (*) in our entry tell cron that for that unit of time, the job should be run every.\ - Eg. */5 * * * * cron for executing every 5 mins") - return cleaned_data \ No newline at end of file diff --git a/application/migrations/0002_auto_20181030_1223.py b/application/migrations/0002_auto_20181030_1223.py new file mode 100644 index 0000000..d6a5a7a --- /dev/null +++ b/application/migrations/0002_auto_20181030_1223.py @@ -0,0 +1,37 @@ +# Generated by Django 2.1.2 on 2018-10-30 11:23 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('application', '0001_initial'), + ] + + operations = [ + migrations.RemoveField( + model_name='reportrecipient', + name='scheduled_report', + ), + migrations.RemoveField( + model_name='scheduledreportgroup', + name='report', + ), + migrations.RemoveField( + model_name='scheduledreportgroup', + name='scheduled_report', + ), + migrations.DeleteModel( + name='Report', + ), + migrations.DeleteModel( + name='ReportRecipient', + ), + migrations.DeleteModel( + name='ScheduledReport', + ), + migrations.DeleteModel( + name='ScheduledReportGroup', + ), + ] diff --git a/application/models.py b/application/models.py index 1f1738a..9c03b3a 100644 --- a/application/models.py +++ b/application/models.py @@ -26,40 +26,4 @@ class Post(models.Model): self.save() def __str__(self): - return self.title - -class Report(models.Model): - report_text = models.TextField() - -class ScheduledReport(models.Model): - """ - Contains email subject and cron expression,to evaluate when the email has to be sent - """ - subject = models.CharField(max_length=200) - last_run_at = models.DateTimeField(null=True, blank=True) - next_run_at = models.DateTimeField(null=True, blank=True) - cron_expression = models.CharField(max_length=200) - def save(self, *args, **kwargs): - """ - function to evaluate "next_run_at" using the cron expression, so that it is updated once the report is sent. - """ - self.last_run_at = datetime.now() - iter = croniter(self.cron_expression, self.last_run_at) - self.next_run_at = iter.get_next(datetime) - super(ScheduledReport, self).save(*args, **kwargs) - def __unicode__(self): - return self.subject - -class ScheduledReportGroup(models.Model): - """ - Many to many mapping between reports which will be sent out in a scheduled report - """ - report = models.ForeignKey(Report, related_name='report', on_delete=models.CASCADE) - scheduled_report = models.ForeignKey(ScheduledReport, - related_name='relatedscheduledreport', on_delete=models.CASCADE) -class ReportRecipient(models.Model): - """ - Stores all the recipients of the given scheduled report - """ - email = models.EmailField() - scheduled_report = models.ForeignKey(ScheduledReport, related_name='reportrecep', on_delete=models.CASCADE) \ No newline at end of file + return self.title \ No newline at end of file diff --git a/application/tasks.py b/application/tasks.py deleted file mode 100644 index ccc572f..0000000 --- a/application/tasks.py +++ /dev/null @@ -1,7 +0,0 @@ -from celery.task.schedules import crontab -from celery.decorators import periodic_task -from .email_service import send_emails -# this will run every minute, see http://celeryproject.org/docs/reference/celery.task.schedules.html#celery.task.schedules.crontab -@periodic_task(run_every=crontab(hour="*", minute="*", day_of_week="*")) -def trigger_emails(): - send_emails() \ No newline at end of file diff --git a/log.txt b/log.txt index 29bb3f4..21125b0 100644 --- a/log.txt +++ b/log.txt @@ -400,3 +400,7 @@ [24/Oct/2018 19:03:28] INFO [mysite:191] ]> [24/Oct/2018 19:03:45] INFO [mysite:189] bamberg [24/Oct/2018 19:03:45] INFO [mysite:191] , ]> +[30/Oct/2018 12:25:09] INFO [mysite:56] ]> +[30/Oct/2018 12:25:11] INFO [mysite:56] , ]> +[30/Oct/2018 12:25:26] INFO [mysite:189] None +[30/Oct/2018 12:25:34] INFO [mysite:189] bayern diff --git a/mysite/settings.py b/mysite/settings.py index dc0e8b4..e73d42b 100644 --- a/mysite/settings.py +++ b/mysite/settings.py @@ -13,7 +13,6 @@ https://docs.djangoproject.com/en/2.0/ref/settings/ import os import re import socket -import djcelery # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) @@ -47,7 +46,6 @@ INSTALLED_APPS = [ 'application', 'taggit', 'taggit_templatetags2', - 'djcelery', 'kombu.transport.django', ] @@ -253,12 +251,3 @@ if DEBUG: DEBUG_TOOLBAR_CONFIG = { 'INTERCEPT_REDIRECTS': False, } - -# Celery settings -BROKER_URL = 'django://' -CELERY_ACCEPT_CONTENT = ['json'] -CELERY_TASK_SERIALIZER = 'json' -CELERY_RESULT_SERIALIZER = 'json' -CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend' -CELERYBEAT_SCHEDULER = "djcelery.schedulers.DatabaseScheduler" -djcelery.setup_loader() \ No newline at end of file diff --git a/thesisenv/lib/python3.6/site-packages/celery/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/__init__.py deleted file mode 100644 index ba5f057..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/__init__.py +++ /dev/null @@ -1,155 +0,0 @@ -# -*- coding: utf-8 -*- -"""Distributed Task Queue""" -# :copyright: (c) 2015 Ask Solem and individual contributors. -# All rights # reserved. -# :copyright: (c) 2012-2014 GoPivotal, Inc., All rights reserved. -# :copyright: (c) 2009 - 2012 Ask Solem and individual contributors, -# All rights reserved. -# :license: BSD (3 Clause), see LICENSE for more details. - -from __future__ import absolute_import - -import os -import sys - -from collections import namedtuple - -version_info_t = namedtuple( - 'version_info_t', ('major', 'minor', 'micro', 'releaselevel', 'serial'), -) - -SERIES = 'Cipater' -VERSION = version_info_t(3, 1, 26, '.post2', '') -__version__ = '{0.major}.{0.minor}.{0.micro}{0.releaselevel}'.format(VERSION) -__author__ = 'Ask Solem' -__contact__ = 'ask@celeryproject.org' -__homepage__ = 'http://celeryproject.org' -__docformat__ = 'restructuredtext' -__all__ = [ - 'Celery', 'bugreport', 'shared_task', 'task', - 'current_app', 'current_task', 'maybe_signature', - 'chain', 'chord', 'chunks', 'group', 'signature', - 'xmap', 'xstarmap', 'uuid', 'version', '__version__', -] -VERSION_BANNER = '{0} ({1})'.format(__version__, SERIES) - -# -eof meta- - -if os.environ.get('C_IMPDEBUG'): # pragma: no cover - from .five import builtins - real_import = builtins.__import__ - - def debug_import(name, locals=None, globals=None, - fromlist=None, level=-1): - glob = globals or getattr(sys, 'emarfteg_'[::-1])(1).f_globals - importer_name = glob and glob.get('__name__') or 'unknown' - print('-- {0} imports {1}'.format(importer_name, name)) - return real_import(name, locals, globals, fromlist, level) - builtins.__import__ = debug_import - -# This is never executed, but tricks static analyzers (PyDev, PyCharm, -# pylint, etc.) into knowing the types of these symbols, and what -# they contain. -STATICA_HACK = True -globals()['kcah_acitats'[::-1].upper()] = False -if STATICA_HACK: # pragma: no cover - from celery.app import shared_task # noqa - from celery.app.base import Celery # noqa - from celery.app.utils import bugreport # noqa - from celery.app.task import Task # noqa - from celery._state import current_app, current_task # noqa - from celery.canvas import ( # noqa - chain, chord, chunks, group, - signature, maybe_signature, xmap, xstarmap, subtask, - ) - from celery.utils import uuid # noqa - -# Eventlet/gevent patching must happen before importing -# anything else, so these tools must be at top-level. - - -def _find_option_with_arg(argv, short_opts=None, long_opts=None): - """Search argv for option specifying its short and longopt - alternatives. - - Return the value of the option if found. - - """ - for i, arg in enumerate(argv): - if arg.startswith('-'): - if long_opts and arg.startswith('--'): - name, _, val = arg.partition('=') - if name in long_opts: - return val - if short_opts and arg in short_opts: - return argv[i + 1] - raise KeyError('|'.join(short_opts or [] + long_opts or [])) - - -def _patch_eventlet(): - import eventlet - import eventlet.debug - eventlet.monkey_patch() - EVENTLET_DBLOCK = int(os.environ.get('EVENTLET_NOBLOCK', 0)) - if EVENTLET_DBLOCK: - eventlet.debug.hub_blocking_detection(EVENTLET_DBLOCK) - - -def _patch_gevent(): - from gevent import monkey, version_info - monkey.patch_all() - if version_info[0] == 0: # pragma: no cover - # Signals aren't working in gevent versions <1.0, - # and are not monkey patched by patch_all() - from gevent import signal as _gevent_signal - _signal = __import__('signal') - _signal.signal = _gevent_signal - - -def maybe_patch_concurrency(argv=sys.argv, - short_opts=['-P'], long_opts=['--pool'], - patches={'eventlet': _patch_eventlet, - 'gevent': _patch_gevent}): - """With short and long opt alternatives that specify the command line - option to set the pool, this makes sure that anything that needs - to be patched is completed as early as possible. - (e.g. eventlet/gevent monkey patches).""" - try: - pool = _find_option_with_arg(argv, short_opts, long_opts) - except KeyError: - pass - else: - try: - patcher = patches[pool] - except KeyError: - pass - else: - patcher() - # set up eventlet/gevent environments ASAP. - from celery import concurrency - concurrency.get_implementation(pool) - -# Lazy loading -from celery import five # noqa - -old_module, new_module = five.recreate_module( # pragma: no cover - __name__, - by_module={ - 'celery.app': ['Celery', 'bugreport', 'shared_task'], - 'celery.app.task': ['Task'], - 'celery._state': ['current_app', 'current_task'], - 'celery.canvas': ['chain', 'chord', 'chunks', 'group', - 'signature', 'maybe_signature', 'subtask', - 'xmap', 'xstarmap'], - 'celery.utils': ['uuid'], - }, - direct={'task': 'celery.task'}, - __package__='celery', __file__=__file__, - __path__=__path__, __doc__=__doc__, __version__=__version__, - __author__=__author__, __contact__=__contact__, - __homepage__=__homepage__, __docformat__=__docformat__, five=five, - VERSION=VERSION, SERIES=SERIES, VERSION_BANNER=VERSION_BANNER, - version_info_t=version_info_t, - maybe_patch_concurrency=maybe_patch_concurrency, - _find_option_with_arg=_find_option_with_arg, -) diff --git a/thesisenv/lib/python3.6/site-packages/celery/__main__.py b/thesisenv/lib/python3.6/site-packages/celery/__main__.py deleted file mode 100644 index 04448e2..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/__main__.py +++ /dev/null @@ -1,54 +0,0 @@ -from __future__ import absolute_import - -import sys - -from os.path import basename - -from . import maybe_patch_concurrency - -__all__ = ['main'] - -DEPRECATED_FMT = """ -The {old!r} command is deprecated, please use {new!r} instead: - -$ {new_argv} - -""" - - -def _warn_deprecated(new): - print(DEPRECATED_FMT.format( - old=basename(sys.argv[0]), new=new, - new_argv=' '.join([new] + sys.argv[1:])), - ) - - -def main(): - if 'multi' not in sys.argv: - maybe_patch_concurrency() - from celery.bin.celery import main - main() - - -def _compat_worker(): - maybe_patch_concurrency() - _warn_deprecated('celery worker') - from celery.bin.worker import main - main() - - -def _compat_multi(): - _warn_deprecated('celery multi') - from celery.bin.multi import main - main() - - -def _compat_beat(): - maybe_patch_concurrency() - _warn_deprecated('celery beat') - from celery.bin.beat import main - main() - - -if __name__ == '__main__': # pragma: no cover - main() diff --git a/thesisenv/lib/python3.6/site-packages/celery/_state.py b/thesisenv/lib/python3.6/site-packages/celery/_state.py deleted file mode 100644 index 755bb92..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/_state.py +++ /dev/null @@ -1,159 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery._state - ~~~~~~~~~~~~~~~ - - This is an internal module containing thread state - like the ``current_app``, and ``current_task``. - - This module shouldn't be used directly. - -""" -from __future__ import absolute_import, print_function - -import os -import sys -import threading -import weakref - -from celery.local import Proxy -from celery.utils.threads import LocalStack - -try: - from weakref import WeakSet as AppSet -except ImportError: # XXX Py2.6 - - class AppSet(object): # noqa - - def __init__(self): - self._refs = set() - - def add(self, app): - self._refs.add(weakref.ref(app)) - - def __iter__(self): - dirty = [] - try: - for appref in self._refs: - app = appref() - if app is None: - dirty.append(appref) - else: - yield app - finally: - while dirty: - self._refs.discard(dirty.pop()) - -__all__ = ['set_default_app', 'get_current_app', 'get_current_task', - 'get_current_worker_task', 'current_app', 'current_task', - 'connect_on_app_finalize'] - -#: Global default app used when no current app. -default_app = None - -#: List of all app instances (weakrefs), must not be used directly. -_apps = AppSet() - -#: global set of functions to call whenever a new app is finalized -#: E.g. Shared tasks, and builtin tasks are created -#: by adding callbacks here. -_on_app_finalizers = set() - -_task_join_will_block = False - - -def connect_on_app_finalize(callback): - _on_app_finalizers.add(callback) - return callback - - -def _announce_app_finalized(app): - callbacks = set(_on_app_finalizers) - for callback in callbacks: - callback(app) - - -def _set_task_join_will_block(blocks): - global _task_join_will_block - _task_join_will_block = blocks - - -def task_join_will_block(): - return _task_join_will_block - - -class _TLS(threading.local): - #: Apps with the :attr:`~celery.app.base.BaseApp.set_as_current` attribute - #: sets this, so it will always contain the last instantiated app, - #: and is the default app returned by :func:`app_or_default`. - current_app = None -_tls = _TLS() - -_task_stack = LocalStack() - - -def set_default_app(app): - global default_app - default_app = app - - -def _get_current_app(): - if default_app is None: - #: creates the global fallback app instance. - from celery.app import Celery - set_default_app(Celery( - 'default', - loader=os.environ.get('CELERY_LOADER') or 'default', - fixups=[], - set_as_current=False, accept_magic_kwargs=True, - )) - return _tls.current_app or default_app - - -def _set_current_app(app): - _tls.current_app = app - - -C_STRICT_APP = os.environ.get('C_STRICT_APP') -if os.environ.get('C_STRICT_APP'): # pragma: no cover - def get_current_app(): - raise Exception('USES CURRENT APP') - import traceback - print('-- USES CURRENT_APP', file=sys.stderr) # noqa+ - traceback.print_stack(file=sys.stderr) - return _get_current_app() -else: - get_current_app = _get_current_app - - -def get_current_task(): - """Currently executing task.""" - return _task_stack.top - - -def get_current_worker_task(): - """Currently executing task, that was applied by the worker. - - This is used to differentiate between the actual task - executed by the worker and any task that was called within - a task (using ``task.__call__`` or ``task.apply``) - - """ - for task in reversed(_task_stack.stack): - if not task.request.called_directly: - return task - - -#: Proxy to current app. -current_app = Proxy(get_current_app) - -#: Proxy to current task. -current_task = Proxy(get_current_task) - - -def _register_app(app): - _apps.add(app) - - -def _get_active_apps(): - return _apps diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/app/__init__.py deleted file mode 100644 index 952a874..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/__init__.py +++ /dev/null @@ -1,150 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.app - ~~~~~~~~~~ - - Celery Application. - -""" -from __future__ import absolute_import - -import os - -from celery.local import Proxy -from celery import _state -from celery._state import ( - get_current_app as current_app, - get_current_task as current_task, - connect_on_app_finalize, set_default_app, _get_active_apps, _task_stack, -) -from celery.utils import gen_task_name - -from .base import Celery, AppPickler - -__all__ = ['Celery', 'AppPickler', 'default_app', 'app_or_default', - 'bugreport', 'enable_trace', 'disable_trace', 'shared_task', - 'set_default_app', 'current_app', 'current_task', - 'push_current_task', 'pop_current_task'] - -#: Proxy always returning the app set as default. -default_app = Proxy(lambda: _state.default_app) - -#: Function returning the app provided or the default app if none. -#: -#: The environment variable :envvar:`CELERY_TRACE_APP` is used to -#: trace app leaks. When enabled an exception is raised if there -#: is no active app. -app_or_default = None - -#: The 'default' loader is the default loader used by old applications. -#: This is deprecated and should no longer be used as it's set too early -#: to be affected by --loader argument. -default_loader = os.environ.get('CELERY_LOADER') or 'default' # XXX - - -#: Function used to push a task to the thread local stack -#: keeping track of the currently executing task. -#: You must remember to pop the task after. -push_current_task = _task_stack.push - -#: Function used to pop a task from the thread local stack -#: keeping track of the currently executing task. -pop_current_task = _task_stack.pop - - -def bugreport(app=None): - return (app or current_app()).bugreport() - - -def _app_or_default(app=None): - if app is None: - return _state.get_current_app() - return app - - -def _app_or_default_trace(app=None): # pragma: no cover - from traceback import print_stack - from billiard import current_process - if app is None: - if getattr(_state._tls, 'current_app', None): - print('-- RETURNING TO CURRENT APP --') # noqa+ - print_stack() - return _state._tls.current_app - if current_process()._name == 'MainProcess': - raise Exception('DEFAULT APP') - print('-- RETURNING TO DEFAULT APP --') # noqa+ - print_stack() - return _state.default_app - return app - - -def enable_trace(): - global app_or_default - app_or_default = _app_or_default_trace - - -def disable_trace(): - global app_or_default - app_or_default = _app_or_default - -if os.environ.get('CELERY_TRACE_APP'): # pragma: no cover - enable_trace() -else: - disable_trace() - -App = Celery # XXX Compat - - -def shared_task(*args, **kwargs): - """Create shared tasks (decorator). - Will return a proxy that always takes the task from the current apps - task registry. - - This can be used by library authors to create tasks that will work - for any app environment. - - Example: - - >>> from celery import Celery, shared_task - >>> @shared_task - ... def add(x, y): - ... return x + y - - >>> app1 = Celery(broker='amqp://') - >>> add.app is app1 - True - - >>> app2 = Celery(broker='redis://') - >>> add.app is app2 - - """ - - def create_shared_task(**options): - - def __inner(fun): - name = options.get('name') - # Set as shared task so that unfinalized apps, - # and future apps will load the task. - connect_on_app_finalize( - lambda app: app._task_from_fun(fun, **options) - ) - - # Force all finalized apps to take this task as well. - for app in _get_active_apps(): - if app.finalized: - with app._finalize_mutex: - app._task_from_fun(fun, **options) - - # Return a proxy that always gets the task from the current - # apps task registry. - def task_by_cons(): - app = current_app() - return app.tasks[ - name or gen_task_name(app, fun.__name__, fun.__module__) - ] - return Proxy(task_by_cons) - return __inner - - if len(args) == 1 and callable(args[0]): - return create_shared_task(**kwargs)(args[0]) - return create_shared_task(*args, **kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/amqp.py b/thesisenv/lib/python3.6/site-packages/celery/app/amqp.py deleted file mode 100644 index 27838c2..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/amqp.py +++ /dev/null @@ -1,512 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.app.amqp - ~~~~~~~~~~~~~~~ - - Sending and receiving messages using Kombu. - -""" -from __future__ import absolute_import - -import numbers - -from datetime import timedelta -from weakref import WeakValueDictionary - -from kombu import Connection, Consumer, Exchange, Producer, Queue -from kombu.common import Broadcast -from kombu.pools import ProducerPool -from kombu.utils import cached_property, uuid -from kombu.utils.encoding import safe_repr -from kombu.utils.functional import maybe_list - -from celery import signals -from celery.five import items, string_t -from celery.utils.text import indent as textindent -from celery.utils.timeutils import to_utc - -from . import app_or_default -from . import routes as _routes - -__all__ = ['AMQP', 'Queues', 'TaskProducer', 'TaskConsumer'] - -#: earliest date supported by time.mktime. -INT_MIN = -2147483648 - -#: Human readable queue declaration. -QUEUE_FORMAT = """ -.> {0.name:<16} exchange={0.exchange.name}({0.exchange.type}) \ -key={0.routing_key} -""" - - -class Queues(dict): - """Queue name⇒ declaration mapping. - - :param queues: Initial list/tuple or dict of queues. - :keyword create_missing: By default any unknown queues will be - added automatically, but if disabled - the occurrence of unknown queues - in `wanted` will raise :exc:`KeyError`. - :keyword ha_policy: Default HA policy for queues with none set. - - - """ - #: If set, this is a subset of queues to consume from. - #: The rest of the queues are then used for routing only. - _consume_from = None - - def __init__(self, queues=None, default_exchange=None, - create_missing=True, ha_policy=None, autoexchange=None): - dict.__init__(self) - self.aliases = WeakValueDictionary() - self.default_exchange = default_exchange - self.create_missing = create_missing - self.ha_policy = ha_policy - self.autoexchange = Exchange if autoexchange is None else autoexchange - if isinstance(queues, (tuple, list)): - queues = dict((q.name, q) for q in queues) - for name, q in items(queues or {}): - self.add(q) if isinstance(q, Queue) else self.add_compat(name, **q) - - def __getitem__(self, name): - try: - return self.aliases[name] - except KeyError: - return dict.__getitem__(self, name) - - def __setitem__(self, name, queue): - if self.default_exchange and (not queue.exchange or - not queue.exchange.name): - queue.exchange = self.default_exchange - dict.__setitem__(self, name, queue) - if queue.alias: - self.aliases[queue.alias] = queue - - def __missing__(self, name): - if self.create_missing: - return self.add(self.new_missing(name)) - raise KeyError(name) - - def add(self, queue, **kwargs): - """Add new queue. - - The first argument can either be a :class:`kombu.Queue` instance, - or the name of a queue. If the former the rest of the keyword - arguments are ignored, and options are simply taken from the queue - instance. - - :param queue: :class:`kombu.Queue` instance or name of the queue. - :keyword exchange: (if named) specifies exchange name. - :keyword routing_key: (if named) specifies binding key. - :keyword exchange_type: (if named) specifies type of exchange. - :keyword \*\*options: (if named) Additional declaration options. - - """ - if not isinstance(queue, Queue): - return self.add_compat(queue, **kwargs) - if self.ha_policy: - if queue.queue_arguments is None: - queue.queue_arguments = {} - self._set_ha_policy(queue.queue_arguments) - self[queue.name] = queue - return queue - - def add_compat(self, name, **options): - # docs used to use binding_key as routing key - options.setdefault('routing_key', options.get('binding_key')) - if options['routing_key'] is None: - options['routing_key'] = name - if self.ha_policy is not None: - self._set_ha_policy(options.setdefault('queue_arguments', {})) - q = self[name] = Queue.from_dict(name, **options) - return q - - def _set_ha_policy(self, args): - policy = self.ha_policy - if isinstance(policy, (list, tuple)): - return args.update({'x-ha-policy': 'nodes', - 'x-ha-policy-params': list(policy)}) - args['x-ha-policy'] = policy - - def format(self, indent=0, indent_first=True): - """Format routing table into string for log dumps.""" - active = self.consume_from - if not active: - return '' - info = [QUEUE_FORMAT.strip().format(q) - for _, q in sorted(items(active))] - if indent_first: - return textindent('\n'.join(info), indent) - return info[0] + '\n' + textindent('\n'.join(info[1:]), indent) - - def select_add(self, queue, **kwargs): - """Add new task queue that will be consumed from even when - a subset has been selected using the :option:`-Q` option.""" - q = self.add(queue, **kwargs) - if self._consume_from is not None: - self._consume_from[q.name] = q - return q - - def select(self, include): - """Sets :attr:`consume_from` by selecting a subset of the - currently defined queues. - - :param include: Names of queues to consume from. - Can be iterable or string. - """ - if include: - self._consume_from = dict((name, self[name]) - for name in maybe_list(include)) - select_subset = select # XXX compat - - def deselect(self, exclude): - """Deselect queues so that they will not be consumed from. - - :param exclude: Names of queues to avoid consuming from. - Can be iterable or string. - - """ - if exclude: - exclude = maybe_list(exclude) - if self._consume_from is None: - # using selection - return self.select(k for k in self if k not in exclude) - # using all queues - for queue in exclude: - self._consume_from.pop(queue, None) - select_remove = deselect # XXX compat - - def new_missing(self, name): - return Queue(name, self.autoexchange(name), name) - - @property - def consume_from(self): - if self._consume_from is not None: - return self._consume_from - return self - - -class TaskProducer(Producer): - app = None - auto_declare = False - retry = False - retry_policy = None - utc = True - event_dispatcher = None - send_sent_event = False - - def __init__(self, channel=None, exchange=None, *args, **kwargs): - self.retry = kwargs.pop('retry', self.retry) - self.retry_policy = kwargs.pop('retry_policy', - self.retry_policy or {}) - self.send_sent_event = kwargs.pop('send_sent_event', - self.send_sent_event) - exchange = exchange or self.exchange - self.queues = self.app.amqp.queues # shortcut - self.default_queue = self.app.amqp.default_queue - self._default_mode = self.app.conf.CELERY_DEFAULT_DELIVERY_MODE - super(TaskProducer, self).__init__(channel, exchange, *args, **kwargs) - - def publish_task(self, task_name, task_args=None, task_kwargs=None, - countdown=None, eta=None, task_id=None, group_id=None, - taskset_id=None, # compat alias to group_id - expires=None, exchange=None, exchange_type=None, - event_dispatcher=None, retry=None, retry_policy=None, - queue=None, now=None, retries=0, chord=None, - callbacks=None, errbacks=None, routing_key=None, - serializer=None, delivery_mode=None, compression=None, - reply_to=None, time_limit=None, soft_time_limit=None, - declare=None, headers=None, - send_before_publish=signals.before_task_publish.send, - before_receivers=signals.before_task_publish.receivers, - send_after_publish=signals.after_task_publish.send, - after_receivers=signals.after_task_publish.receivers, - send_task_sent=signals.task_sent.send, # XXX deprecated - sent_receivers=signals.task_sent.receivers, - **kwargs): - """Send task message.""" - retry = self.retry if retry is None else retry - headers = {} if headers is None else headers - - qname = queue - if queue is None and exchange is None: - queue = self.default_queue - if queue is not None: - if isinstance(queue, string_t): - qname, queue = queue, self.queues[queue] - else: - qname = queue.name - exchange = exchange or queue.exchange.name - routing_key = routing_key or queue.routing_key - if declare is None and queue and not isinstance(queue, Broadcast): - declare = [queue] - if delivery_mode is None: - delivery_mode = self._default_mode - - # merge default and custom policy - retry = self.retry if retry is None else retry - _rp = (dict(self.retry_policy, **retry_policy) if retry_policy - else self.retry_policy) - task_id = task_id or uuid() - task_args = task_args or [] - task_kwargs = task_kwargs or {} - if not isinstance(task_args, (list, tuple)): - raise ValueError('task args must be a list or tuple') - if not isinstance(task_kwargs, dict): - raise ValueError('task kwargs must be a dictionary') - if countdown: # Convert countdown to ETA. - self._verify_seconds(countdown, 'countdown') - now = now or self.app.now() - eta = now + timedelta(seconds=countdown) - if self.utc: - eta = to_utc(eta).astimezone(self.app.timezone) - if isinstance(expires, numbers.Real): - self._verify_seconds(expires, 'expires') - now = now or self.app.now() - expires = now + timedelta(seconds=expires) - if self.utc: - expires = to_utc(expires).astimezone(self.app.timezone) - eta = eta and eta.isoformat() - expires = expires and expires.isoformat() - - body = { - 'task': task_name, - 'id': task_id, - 'args': task_args, - 'kwargs': task_kwargs, - 'retries': retries or 0, - 'eta': eta, - 'expires': expires, - 'utc': self.utc, - 'callbacks': callbacks, - 'errbacks': errbacks, - 'timelimit': (time_limit, soft_time_limit), - 'taskset': group_id or taskset_id, - 'chord': chord, - } - - if before_receivers: - send_before_publish( - sender=task_name, body=body, - exchange=exchange, - routing_key=routing_key, - declare=declare, - headers=headers, - properties=kwargs, - retry_policy=retry_policy, - ) - - self.publish( - body, - exchange=exchange, routing_key=routing_key, - serializer=serializer or self.serializer, - compression=compression or self.compression, - headers=headers, - retry=retry, retry_policy=_rp, - reply_to=reply_to, - correlation_id=task_id, - delivery_mode=delivery_mode, declare=declare, - **kwargs - ) - - if after_receivers: - send_after_publish(sender=task_name, body=body, - exchange=exchange, routing_key=routing_key) - - if sent_receivers: # XXX deprecated - send_task_sent(sender=task_name, task_id=task_id, - task=task_name, args=task_args, - kwargs=task_kwargs, eta=eta, - taskset=group_id or taskset_id) - if self.send_sent_event: - evd = event_dispatcher or self.event_dispatcher - exname = exchange or self.exchange - if isinstance(exname, Exchange): - exname = exname.name - evd.publish( - 'task-sent', - { - 'uuid': task_id, - 'name': task_name, - 'args': safe_repr(task_args), - 'kwargs': safe_repr(task_kwargs), - 'retries': retries, - 'eta': eta, - 'expires': expires, - 'queue': qname, - 'exchange': exname, - 'routing_key': routing_key, - }, - self, retry=retry, retry_policy=retry_policy, - ) - return task_id - delay_task = publish_task # XXX Compat - - def _verify_seconds(self, s, what): - if s < INT_MIN: - raise ValueError('%s is out of range: %r' % (what, s)) - return s - - @cached_property - def event_dispatcher(self): - # We call Dispatcher.publish with a custom producer - # so don't need the dispatcher to be "enabled". - return self.app.events.Dispatcher(enabled=False) - - -class TaskPublisher(TaskProducer): - """Deprecated version of :class:`TaskProducer`.""" - - def __init__(self, channel=None, exchange=None, *args, **kwargs): - self.app = app_or_default(kwargs.pop('app', self.app)) - self.retry = kwargs.pop('retry', self.retry) - self.retry_policy = kwargs.pop('retry_policy', - self.retry_policy or {}) - exchange = exchange or self.exchange - if not isinstance(exchange, Exchange): - exchange = Exchange(exchange, - kwargs.pop('exchange_type', 'direct')) - self.queues = self.app.amqp.queues # shortcut - super(TaskPublisher, self).__init__(channel, exchange, *args, **kwargs) - - -class TaskConsumer(Consumer): - app = None - - def __init__(self, channel, queues=None, app=None, accept=None, **kw): - self.app = app or self.app - if accept is None: - accept = self.app.conf.CELERY_ACCEPT_CONTENT - super(TaskConsumer, self).__init__( - channel, - queues or list(self.app.amqp.queues.consume_from.values()), - accept=accept, - **kw - ) - - -class AMQP(object): - Connection = Connection - Consumer = Consumer - - #: compat alias to Connection - BrokerConnection = Connection - - producer_cls = TaskProducer - consumer_cls = TaskConsumer - queues_cls = Queues - - #: Cached and prepared routing table. - _rtable = None - - #: Underlying producer pool instance automatically - #: set by the :attr:`producer_pool`. - _producer_pool = None - - # Exchange class/function used when defining automatic queues. - # E.g. you can use ``autoexchange = lambda n: None`` to use the - # amqp default exchange, which is a shortcut to bypass routing - # and instead send directly to the queue named in the routing key. - autoexchange = None - - def __init__(self, app): - self.app = app - - def flush_routes(self): - self._rtable = _routes.prepare(self.app.conf.CELERY_ROUTES) - - def Queues(self, queues, create_missing=None, ha_policy=None, - autoexchange=None): - """Create new :class:`Queues` instance, using queue defaults - from the current configuration.""" - conf = self.app.conf - if create_missing is None: - create_missing = conf.CELERY_CREATE_MISSING_QUEUES - if ha_policy is None: - ha_policy = conf.CELERY_QUEUE_HA_POLICY - if not queues and conf.CELERY_DEFAULT_QUEUE: - queues = (Queue(conf.CELERY_DEFAULT_QUEUE, - exchange=self.default_exchange, - routing_key=conf.CELERY_DEFAULT_ROUTING_KEY), ) - autoexchange = (self.autoexchange if autoexchange is None - else autoexchange) - return self.queues_cls( - queues, self.default_exchange, create_missing, - ha_policy, autoexchange, - ) - - def Router(self, queues=None, create_missing=None): - """Return the current task router.""" - return _routes.Router(self.routes, queues or self.queues, - self.app.either('CELERY_CREATE_MISSING_QUEUES', - create_missing), app=self.app) - - @cached_property - def TaskConsumer(self): - """Return consumer configured to consume from the queues - we are configured for (``app.amqp.queues.consume_from``).""" - return self.app.subclass_with_self(self.consumer_cls, - reverse='amqp.TaskConsumer') - get_task_consumer = TaskConsumer # XXX compat - - @cached_property - def TaskProducer(self): - """Return publisher used to send tasks. - - You should use `app.send_task` instead. - - """ - conf = self.app.conf - return self.app.subclass_with_self( - self.producer_cls, - reverse='amqp.TaskProducer', - exchange=self.default_exchange, - routing_key=conf.CELERY_DEFAULT_ROUTING_KEY, - serializer=conf.CELERY_TASK_SERIALIZER, - compression=conf.CELERY_MESSAGE_COMPRESSION, - retry=conf.CELERY_TASK_PUBLISH_RETRY, - retry_policy=conf.CELERY_TASK_PUBLISH_RETRY_POLICY, - send_sent_event=conf.CELERY_SEND_TASK_SENT_EVENT, - utc=conf.CELERY_ENABLE_UTC, - ) - TaskPublisher = TaskProducer # compat - - @cached_property - def default_queue(self): - return self.queues[self.app.conf.CELERY_DEFAULT_QUEUE] - - @cached_property - def queues(self): - """Queue name⇒ declaration mapping.""" - return self.Queues(self.app.conf.CELERY_QUEUES) - - @queues.setter # noqa - def queues(self, queues): - return self.Queues(queues) - - @property - def routes(self): - if self._rtable is None: - self.flush_routes() - return self._rtable - - @cached_property - def router(self): - return self.Router() - - @property - def producer_pool(self): - if self._producer_pool is None: - self._producer_pool = ProducerPool( - self.app.pool, - limit=self.app.pool.limit, - Producer=self.TaskProducer, - ) - return self._producer_pool - publisher_pool = producer_pool # compat alias - - @cached_property - def default_exchange(self): - return Exchange(self.app.conf.CELERY_DEFAULT_EXCHANGE, - self.app.conf.CELERY_DEFAULT_EXCHANGE_TYPE) diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/annotations.py b/thesisenv/lib/python3.6/site-packages/celery/app/annotations.py deleted file mode 100644 index 27f436b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/annotations.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.app.annotations - ~~~~~~~~~~~~~~~~~~~~~~ - - Annotations is a nice term for monkey patching - task classes in the configuration. - - This prepares and performs the annotations in the - :setting:`CELERY_ANNOTATIONS` setting. - -""" -from __future__ import absolute_import - -from celery.five import string_t -from celery.utils.functional import firstmethod, mlazy -from celery.utils.imports import instantiate - -_first_match = firstmethod('annotate') -_first_match_any = firstmethod('annotate_any') - -__all__ = ['MapAnnotation', 'prepare', 'resolve_all'] - - -class MapAnnotation(dict): - - def annotate_any(self): - try: - return dict(self['*']) - except KeyError: - pass - - def annotate(self, task): - try: - return dict(self[task.name]) - except KeyError: - pass - - -def prepare(annotations): - """Expands the :setting:`CELERY_ANNOTATIONS` setting.""" - - def expand_annotation(annotation): - if isinstance(annotation, dict): - return MapAnnotation(annotation) - elif isinstance(annotation, string_t): - return mlazy(instantiate, annotation) - return annotation - - if annotations is None: - return () - elif not isinstance(annotations, (list, tuple)): - annotations = (annotations, ) - return [expand_annotation(anno) for anno in annotations] - - -def resolve_all(anno, task): - return (x for x in (_first_match(anno, task), _first_match_any(anno)) if x) diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/base.py b/thesisenv/lib/python3.6/site-packages/celery/app/base.py deleted file mode 100644 index 8f33c1b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/base.py +++ /dev/null @@ -1,675 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.app.base - ~~~~~~~~~~~~~~~ - - Actual App instance implementation. - -""" -from __future__ import absolute_import - -import os -import threading -import warnings - -from collections import defaultdict, deque -from copy import deepcopy -from operator import attrgetter - -from amqp import promise -from billiard.util import register_after_fork -from kombu.clocks import LamportClock -from kombu.common import oid_from -from kombu.utils import cached_property, uuid - -from celery import platforms -from celery import signals -from celery._state import ( - _task_stack, get_current_app, _set_current_app, set_default_app, - _register_app, get_current_worker_task, connect_on_app_finalize, - _announce_app_finalized, -) -from celery.exceptions import AlwaysEagerIgnored, ImproperlyConfigured -from celery.five import values -from celery.loaders import get_loader_cls -from celery.local import PromiseProxy, maybe_evaluate -from celery.utils.functional import first, maybe_list -from celery.utils.imports import instantiate, symbol_by_name -from celery.utils.objects import FallbackContext, mro_lookup - -from .annotations import prepare as prepare_annotations -from .defaults import DEFAULTS, find_deprecated_settings -from .registry import TaskRegistry -from .utils import ( - AppPickler, Settings, bugreport, _unpickle_app, _unpickle_app_v2, appstr, -) - -# Load all builtin tasks -from . import builtins # noqa - -__all__ = ['Celery'] - -_EXECV = os.environ.get('FORKED_BY_MULTIPROCESSING') -BUILTIN_FIXUPS = frozenset([ - 'celery.fixups.django:fixup', -]) - -ERR_ENVVAR_NOT_SET = """\ -The environment variable {0!r} is not set, -and as such the configuration could not be loaded. -Please set this variable and make it point to -a configuration module.""" - -_after_fork_registered = False - - -def app_has_custom(app, attr): - return mro_lookup(app.__class__, attr, stop=(Celery, object), - monkey_patched=[__name__]) - - -def _unpickle_appattr(reverse_name, args): - """Given an attribute name and a list of args, gets - the attribute from the current app and calls it.""" - return get_current_app()._rgetattr(reverse_name)(*args) - - -def _global_after_fork(obj): - # Previously every app would call: - # `register_after_fork(app, app._after_fork)` - # but this created a leak as `register_after_fork` stores concrete object - # references and once registered an object cannot be removed without - # touching and iterating over the private afterfork registry list. - # - # See Issue #1949 - from celery import _state - from multiprocessing import util as mputil - for app in _state._apps: - try: - app._after_fork(obj) - except Exception as exc: - if mputil._logger: - mputil._logger.info( - 'after forker raised exception: %r', exc, exc_info=1) - - -def _ensure_after_fork(): - global _after_fork_registered - _after_fork_registered = True - register_after_fork(_global_after_fork, _global_after_fork) - - -class Celery(object): - #: This is deprecated, use :meth:`reduce_keys` instead - Pickler = AppPickler - - SYSTEM = platforms.SYSTEM - IS_OSX, IS_WINDOWS = platforms.IS_OSX, platforms.IS_WINDOWS - - amqp_cls = 'celery.app.amqp:AMQP' - backend_cls = None - events_cls = 'celery.events:Events' - loader_cls = 'celery.loaders.app:AppLoader' - log_cls = 'celery.app.log:Logging' - control_cls = 'celery.app.control:Control' - task_cls = 'celery.app.task:Task' - registry_cls = TaskRegistry - _fixups = None - _pool = None - builtin_fixups = BUILTIN_FIXUPS - - def __init__(self, main=None, loader=None, backend=None, - amqp=None, events=None, log=None, control=None, - set_as_current=True, accept_magic_kwargs=False, - tasks=None, broker=None, include=None, changes=None, - config_source=None, fixups=None, task_cls=None, - autofinalize=True, **kwargs): - self.clock = LamportClock() - self.main = main - self.amqp_cls = amqp or self.amqp_cls - self.events_cls = events or self.events_cls - self.loader_cls = loader or self.loader_cls - self.log_cls = log or self.log_cls - self.control_cls = control or self.control_cls - self.task_cls = task_cls or self.task_cls - self.set_as_current = set_as_current - self.registry_cls = symbol_by_name(self.registry_cls) - self.accept_magic_kwargs = accept_magic_kwargs - self.user_options = defaultdict(set) - self.steps = defaultdict(set) - self.autofinalize = autofinalize - - self.configured = False - self._config_source = config_source - self._pending_defaults = deque() - - self.finalized = False - self._finalize_mutex = threading.Lock() - self._pending = deque() - self._tasks = tasks - if not isinstance(self._tasks, TaskRegistry): - self._tasks = TaskRegistry(self._tasks or {}) - - # If the class defines a custom __reduce_args__ we need to use - # the old way of pickling apps, which is pickling a list of - # args instead of the new way that pickles a dict of keywords. - self._using_v1_reduce = app_has_custom(self, '__reduce_args__') - - # these options are moved to the config to - # simplify pickling of the app object. - self._preconf = changes or {} - if broker: - self._preconf['BROKER_URL'] = broker - if backend: - self._preconf['CELERY_RESULT_BACKEND'] = backend - if include: - self._preconf['CELERY_IMPORTS'] = include - - # - Apply fixups. - self.fixups = set(self.builtin_fixups) if fixups is None else fixups - # ...store fixup instances in _fixups to keep weakrefs alive. - self._fixups = [symbol_by_name(fixup)(self) for fixup in self.fixups] - - if self.set_as_current: - self.set_current() - - self.on_init() - _register_app(self) - - def set_current(self): - _set_current_app(self) - - def set_default(self): - set_default_app(self) - - def __enter__(self): - return self - - def __exit__(self, *exc_info): - self.close() - - def close(self): - self._maybe_close_pool() - - def on_init(self): - """Optional callback called at init.""" - pass - - def start(self, argv=None): - return instantiate( - 'celery.bin.celery:CeleryCommand', - app=self).execute_from_commandline(argv) - - def worker_main(self, argv=None): - return instantiate( - 'celery.bin.worker:worker', - app=self).execute_from_commandline(argv) - - def task(self, *args, **opts): - """Creates new task class from any callable.""" - if _EXECV and not opts.get('_force_evaluate'): - # When using execv the task in the original module will point to a - # different app, so doing things like 'add.request' will point to - # a differnt task instance. This makes sure it will always use - # the task instance from the current app. - # Really need a better solution for this :( - from . import shared_task - return shared_task(*args, _force_evaluate=True, **opts) - - def inner_create_task_cls(shared=True, filter=None, **opts): - _filt = filter # stupid 2to3 - - def _create_task_cls(fun): - if shared: - def cons(app): - return app._task_from_fun(fun, **opts) - cons.__name__ = fun.__name__ - connect_on_app_finalize(cons) - if self.accept_magic_kwargs: # compat mode - task = self._task_from_fun(fun, **opts) - if filter: - task = filter(task) - return task - - if self.finalized or opts.get('_force_evaluate'): - ret = self._task_from_fun(fun, **opts) - else: - # return a proxy object that evaluates on first use - ret = PromiseProxy(self._task_from_fun, (fun, ), opts, - __doc__=fun.__doc__) - self._pending.append(ret) - if _filt: - return _filt(ret) - return ret - - return _create_task_cls - - if len(args) == 1: - if callable(args[0]): - return inner_create_task_cls(**opts)(*args) - raise TypeError('argument 1 to @task() must be a callable') - if args: - raise TypeError( - '@task() takes exactly 1 argument ({0} given)'.format( - sum([len(args), len(opts)]))) - return inner_create_task_cls(**opts) - - def _task_from_fun(self, fun, **options): - if not self.finalized and not self.autofinalize: - raise RuntimeError('Contract breach: app not finalized') - base = options.pop('base', None) or self.Task - bind = options.pop('bind', False) - - T = type(fun.__name__, (base, ), dict({ - 'app': self, - 'accept_magic_kwargs': False, - 'run': fun if bind else staticmethod(fun), - '_decorated': True, - '__doc__': fun.__doc__, - '__module__': fun.__module__, - '__wrapped__': fun}, **options))() - task = self._tasks[T.name] # return global instance. - return task - - def finalize(self, auto=False): - with self._finalize_mutex: - if not self.finalized: - if auto and not self.autofinalize: - raise RuntimeError('Contract breach: app not finalized') - self.finalized = True - _announce_app_finalized(self) - - pending = self._pending - while pending: - maybe_evaluate(pending.popleft()) - - for task in values(self._tasks): - task.bind(self) - - def add_defaults(self, fun): - if not callable(fun): - d, fun = fun, lambda: d - if self.configured: - return self.conf.add_defaults(fun()) - self._pending_defaults.append(fun) - - def config_from_object(self, obj, silent=False, force=False): - self._config_source = obj - if force or self.configured: - del(self.conf) - return self.loader.config_from_object(obj, silent=silent) - - def config_from_envvar(self, variable_name, silent=False, force=False): - module_name = os.environ.get(variable_name) - if not module_name: - if silent: - return False - raise ImproperlyConfigured( - ERR_ENVVAR_NOT_SET.format(variable_name)) - return self.config_from_object(module_name, silent=silent, force=force) - - def config_from_cmdline(self, argv, namespace='celery'): - self.conf.update(self.loader.cmdline_config_parser(argv, namespace)) - - def setup_security(self, allowed_serializers=None, key=None, cert=None, - store=None, digest='sha1', serializer='json'): - from celery.security import setup_security - return setup_security(allowed_serializers, key, cert, - store, digest, serializer, app=self) - - def autodiscover_tasks(self, packages, related_name='tasks', force=False): - if force: - return self._autodiscover_tasks(packages, related_name) - signals.import_modules.connect(promise( - self._autodiscover_tasks, (packages, related_name), - ), weak=False, sender=self) - - def _autodiscover_tasks(self, packages, related_name='tasks', **kwargs): - # argument may be lazy - packages = packages() if callable(packages) else packages - self.loader.autodiscover_tasks(packages, related_name) - - def send_task(self, name, args=None, kwargs=None, countdown=None, - eta=None, task_id=None, producer=None, connection=None, - router=None, result_cls=None, expires=None, - publisher=None, link=None, link_error=None, - add_to_parent=True, reply_to=None, **options): - task_id = task_id or uuid() - producer = producer or publisher # XXX compat - router = router or self.amqp.router - conf = self.conf - if conf.CELERY_ALWAYS_EAGER: # pragma: no cover - warnings.warn(AlwaysEagerIgnored( - 'CELERY_ALWAYS_EAGER has no effect on send_task', - ), stacklevel=2) - options = router.route(options, name, args, kwargs) - if connection: - producer = self.amqp.TaskProducer(connection) - with self.producer_or_acquire(producer) as P: - self.backend.on_task_call(P, task_id) - task_id = P.publish_task( - name, args, kwargs, countdown=countdown, eta=eta, - task_id=task_id, expires=expires, - callbacks=maybe_list(link), errbacks=maybe_list(link_error), - reply_to=reply_to or self.oid, **options - ) - result = (result_cls or self.AsyncResult)(task_id) - if add_to_parent: - parent = get_current_worker_task() - if parent: - parent.add_trail(result) - return result - - def connection(self, hostname=None, userid=None, password=None, - virtual_host=None, port=None, ssl=None, - connect_timeout=None, transport=None, - transport_options=None, heartbeat=None, - login_method=None, failover_strategy=None, **kwargs): - conf = self.conf - return self.amqp.Connection( - hostname or conf.BROKER_URL, - userid or conf.BROKER_USER, - password or conf.BROKER_PASSWORD, - virtual_host or conf.BROKER_VHOST, - port or conf.BROKER_PORT, - transport=transport or conf.BROKER_TRANSPORT, - ssl=self.either('BROKER_USE_SSL', ssl), - heartbeat=heartbeat, - login_method=login_method or conf.BROKER_LOGIN_METHOD, - failover_strategy=( - failover_strategy or conf.BROKER_FAILOVER_STRATEGY - ), - transport_options=dict( - conf.BROKER_TRANSPORT_OPTIONS, **transport_options or {} - ), - connect_timeout=self.either( - 'BROKER_CONNECTION_TIMEOUT', connect_timeout - ), - ) - broker_connection = connection - - def _acquire_connection(self, pool=True): - """Helper for :meth:`connection_or_acquire`.""" - if pool: - return self.pool.acquire(block=True) - return self.connection() - - def connection_or_acquire(self, connection=None, pool=True, *_, **__): - return FallbackContext(connection, self._acquire_connection, pool=pool) - default_connection = connection_or_acquire # XXX compat - - def producer_or_acquire(self, producer=None): - return FallbackContext( - producer, self.amqp.producer_pool.acquire, block=True, - ) - default_producer = producer_or_acquire # XXX compat - - def prepare_config(self, c): - """Prepare configuration before it is merged with the defaults.""" - return find_deprecated_settings(c) - - def now(self): - return self.loader.now(utc=self.conf.CELERY_ENABLE_UTC) - - def mail_admins(self, subject, body, fail_silently=False): - if self.conf.ADMINS: - to = [admin_email for _, admin_email in self.conf.ADMINS] - return self.loader.mail_admins( - subject, body, fail_silently, to=to, - sender=self.conf.SERVER_EMAIL, - host=self.conf.EMAIL_HOST, - port=self.conf.EMAIL_PORT, - user=self.conf.EMAIL_HOST_USER, - password=self.conf.EMAIL_HOST_PASSWORD, - timeout=self.conf.EMAIL_TIMEOUT, - use_ssl=self.conf.EMAIL_USE_SSL, - use_tls=self.conf.EMAIL_USE_TLS, - ) - - def select_queues(self, queues=None): - return self.amqp.queues.select(queues) - - def either(self, default_key, *values): - """Fallback to the value of a configuration key if none of the - `*values` are true.""" - return first(None, values) or self.conf.get(default_key) - - def bugreport(self): - return bugreport(self) - - def _get_backend(self): - from celery.backends import get_backend_by_url - backend, url = get_backend_by_url( - self.backend_cls or self.conf.CELERY_RESULT_BACKEND, - self.loader) - return backend(app=self, url=url) - - def on_configure(self): - """Callback calld when the app loads configuration""" - pass - - def _get_config(self): - self.on_configure() - if self._config_source: - self.loader.config_from_object(self._config_source) - self.configured = True - s = Settings({}, [self.prepare_config(self.loader.conf), - deepcopy(DEFAULTS)]) - # load lazy config dict initializers. - pending = self._pending_defaults - while pending: - s.add_defaults(maybe_evaluate(pending.popleft()())) - - # preconf options must be explicitly set in the conf, and not - # as defaults or they will not be pickled with the app instance. - # This will cause errors when `CELERYD_FORCE_EXECV=True` as - # the workers will not have a BROKER_URL, CELERY_RESULT_BACKEND, - # or CELERY_IMPORTS set in the config. - if self._preconf: - s.update(self._preconf) - return s - - def _after_fork(self, obj_): - self._maybe_close_pool() - - def _maybe_close_pool(self): - pool, self._pool = self._pool, None - if pool is not None: - pool.force_close_all() - amqp = self.__dict__.get('amqp') - if amqp is not None: - producer_pool, amqp._producer_pool = amqp._producer_pool, None - if producer_pool is not None: - producer_pool.force_close_all() - - def signature(self, *args, **kwargs): - kwargs['app'] = self - return self.canvas.signature(*args, **kwargs) - - def create_task_cls(self): - """Creates a base task class using default configuration - taken from this app.""" - return self.subclass_with_self( - self.task_cls, name='Task', attribute='_app', - keep_reduce=True, abstract=True, - ) - - def subclass_with_self(self, Class, name=None, attribute='app', - reverse=None, keep_reduce=False, **kw): - """Subclass an app-compatible class by setting its app attribute - to be this app instance. - - App-compatible means that the class has a class attribute that - provides the default app it should use, e.g. - ``class Foo: app = None``. - - :param Class: The app-compatible class to subclass. - :keyword name: Custom name for the target class. - :keyword attribute: Name of the attribute holding the app, - default is 'app'. - - """ - Class = symbol_by_name(Class) - reverse = reverse if reverse else Class.__name__ - - def __reduce__(self): - return _unpickle_appattr, (reverse, self.__reduce_args__()) - - attrs = dict({attribute: self}, __module__=Class.__module__, - __doc__=Class.__doc__, **kw) - if not keep_reduce: - attrs['__reduce__'] = __reduce__ - - return type(name or Class.__name__, (Class, ), attrs) - - def _rgetattr(self, path): - return attrgetter(path)(self) - - def __repr__(self): - return '<{0} {1}>'.format(type(self).__name__, appstr(self)) - - def __reduce__(self): - if self._using_v1_reduce: - return self.__reduce_v1__() - return (_unpickle_app_v2, (self.__class__, self.__reduce_keys__())) - - def __reduce_v1__(self): - # Reduce only pickles the configuration changes, - # so the default configuration doesn't have to be passed - # between processes. - return ( - _unpickle_app, - (self.__class__, self.Pickler) + self.__reduce_args__(), - ) - - def __reduce_keys__(self): - """Return keyword arguments used to reconstruct the object - when unpickling.""" - return { - 'main': self.main, - 'changes': self.conf.changes if self.configured else self._preconf, - 'loader': self.loader_cls, - 'backend': self.backend_cls, - 'amqp': self.amqp_cls, - 'events': self.events_cls, - 'log': self.log_cls, - 'control': self.control_cls, - 'accept_magic_kwargs': self.accept_magic_kwargs, - 'fixups': self.fixups, - 'config_source': self._config_source, - 'task_cls': self.task_cls, - } - - def __reduce_args__(self): - """Deprecated method, please use :meth:`__reduce_keys__` instead.""" - return (self.main, self.conf.changes, - self.loader_cls, self.backend_cls, self.amqp_cls, - self.events_cls, self.log_cls, self.control_cls, - self.accept_magic_kwargs, self._config_source) - - @cached_property - def Worker(self): - return self.subclass_with_self('celery.apps.worker:Worker') - - @cached_property - def WorkController(self, **kwargs): - return self.subclass_with_self('celery.worker:WorkController') - - @cached_property - def Beat(self, **kwargs): - return self.subclass_with_self('celery.apps.beat:Beat') - - @cached_property - def Task(self): - return self.create_task_cls() - - @cached_property - def annotations(self): - return prepare_annotations(self.conf.CELERY_ANNOTATIONS) - - @cached_property - def AsyncResult(self): - return self.subclass_with_self('celery.result:AsyncResult') - - @cached_property - def ResultSet(self): - return self.subclass_with_self('celery.result:ResultSet') - - @cached_property - def GroupResult(self): - return self.subclass_with_self('celery.result:GroupResult') - - @cached_property - def TaskSet(self): # XXX compat - """Deprecated! Please use :class:`celery.group` instead.""" - return self.subclass_with_self('celery.task.sets:TaskSet') - - @cached_property - def TaskSetResult(self): # XXX compat - """Deprecated! Please use :attr:`GroupResult` instead.""" - return self.subclass_with_self('celery.result:TaskSetResult') - - @property - def pool(self): - if self._pool is None: - _ensure_after_fork() - limit = self.conf.BROKER_POOL_LIMIT - self._pool = self.connection().Pool(limit=limit) - return self._pool - - @property - def current_task(self): - return _task_stack.top - - @cached_property - def oid(self): - return oid_from(self) - - @cached_property - def amqp(self): - return instantiate(self.amqp_cls, app=self) - - @cached_property - def backend(self): - return self._get_backend() - - @cached_property - def conf(self): - return self._get_config() - - @cached_property - def control(self): - return instantiate(self.control_cls, app=self) - - @cached_property - def events(self): - return instantiate(self.events_cls, app=self) - - @cached_property - def loader(self): - return get_loader_cls(self.loader_cls)(app=self) - - @cached_property - def log(self): - return instantiate(self.log_cls, app=self) - - @cached_property - def canvas(self): - from celery import canvas - return canvas - - @cached_property - def tasks(self): - self.finalize(auto=True) - return self._tasks - - @cached_property - def timezone(self): - from celery.utils.timeutils import timezone - conf = self.conf - tz = conf.CELERY_TIMEZONE - if not tz: - return (timezone.get_timezone('UTC') if conf.CELERY_ENABLE_UTC - else timezone.local) - return timezone.get_timezone(self.conf.CELERY_TIMEZONE) -App = Celery # compat diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/builtins.py b/thesisenv/lib/python3.6/site-packages/celery/app/builtins.py deleted file mode 100644 index 1502768..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/builtins.py +++ /dev/null @@ -1,379 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.app.builtins - ~~~~~~~~~~~~~~~~~~~ - - Built-in tasks that are always available in all - app instances. E.g. chord, group and xmap. - -""" -from __future__ import absolute_import - -from collections import deque - -from celery._state import get_current_worker_task, connect_on_app_finalize -from celery.utils import uuid -from celery.utils.log import get_logger - -__all__ = [] - -logger = get_logger(__name__) - - -@connect_on_app_finalize -def add_backend_cleanup_task(app): - """The backend cleanup task can be used to clean up the default result - backend. - - If the configured backend requires periodic cleanup this task is also - automatically configured to run every day at 4am (requires - :program:`celery beat` to be running). - - """ - @app.task(name='celery.backend_cleanup', - shared=False, _force_evaluate=True) - def backend_cleanup(): - app.backend.cleanup() - return backend_cleanup - - -@connect_on_app_finalize -def add_unlock_chord_task(app): - """This task is used by result backends without native chord support. - - It joins chords by creating a task chain polling the header for completion. - - """ - from celery.canvas import signature - from celery.exceptions import ChordError - from celery.result import allow_join_result, result_from_tuple - - default_propagate = app.conf.CELERY_CHORD_PROPAGATES - - @app.task(name='celery.chord_unlock', max_retries=None, shared=False, - default_retry_delay=1, ignore_result=True, _force_evaluate=True, - bind=True) - def unlock_chord(self, group_id, callback, interval=None, propagate=None, - max_retries=None, result=None, - Result=app.AsyncResult, GroupResult=app.GroupResult, - result_from_tuple=result_from_tuple): - # if propagate is disabled exceptions raised by chord tasks - # will be sent as part of the result list to the chord callback. - # Since 3.1 propagate will be enabled by default, and instead - # the chord callback changes state to FAILURE with the - # exception set to ChordError. - propagate = default_propagate if propagate is None else propagate - if interval is None: - interval = self.default_retry_delay - - # check if the task group is ready, and if so apply the callback. - deps = GroupResult( - group_id, - [result_from_tuple(r, app=app) for r in result], - app=app, - ) - j = deps.join_native if deps.supports_native_join else deps.join - - try: - ready = deps.ready() - except Exception as exc: - raise self.retry( - exc=exc, countdown=interval, max_retries=max_retries, - ) - else: - if not ready: - raise self.retry(countdown=interval, max_retries=max_retries) - - callback = signature(callback, app=app) - try: - with allow_join_result(): - ret = j(timeout=3.0, propagate=propagate) - except Exception as exc: - try: - culprit = next(deps._failed_join_report()) - reason = 'Dependency {0.id} raised {1!r}'.format( - culprit, exc, - ) - except StopIteration: - reason = repr(exc) - logger.error('Chord %r raised: %r', group_id, exc, exc_info=1) - app.backend.chord_error_from_stack(callback, - ChordError(reason)) - else: - try: - callback.delay(ret) - except Exception as exc: - logger.error('Chord %r raised: %r', group_id, exc, exc_info=1) - app.backend.chord_error_from_stack( - callback, - exc=ChordError('Callback error: {0!r}'.format(exc)), - ) - return unlock_chord - - -@connect_on_app_finalize -def add_map_task(app): - from celery.canvas import signature - - @app.task(name='celery.map', shared=False, _force_evaluate=True) - def xmap(task, it): - task = signature(task, app=app).type - return [task(item) for item in it] - return xmap - - -@connect_on_app_finalize -def add_starmap_task(app): - from celery.canvas import signature - - @app.task(name='celery.starmap', shared=False, _force_evaluate=True) - def xstarmap(task, it): - task = signature(task, app=app).type - return [task(*item) for item in it] - return xstarmap - - -@connect_on_app_finalize -def add_chunk_task(app): - from celery.canvas import chunks as _chunks - - @app.task(name='celery.chunks', shared=False, _force_evaluate=True) - def chunks(task, it, n): - return _chunks.apply_chunks(task, it, n) - return chunks - - -@connect_on_app_finalize -def add_group_task(app): - _app = app - from celery.canvas import maybe_signature, signature - from celery.result import result_from_tuple - - class Group(app.Task): - app = _app - name = 'celery.group' - accept_magic_kwargs = False - _decorated = True - - def run(self, tasks, result, group_id, partial_args, - add_to_parent=True): - app = self.app - result = result_from_tuple(result, app) - # any partial args are added to all tasks in the group - taskit = (signature(task, app=app).clone(partial_args) - for i, task in enumerate(tasks)) - if self.request.is_eager or app.conf.CELERY_ALWAYS_EAGER: - return app.GroupResult( - result.id, - [stask.apply(group_id=group_id) for stask in taskit], - ) - with app.producer_or_acquire() as pub: - [stask.apply_async(group_id=group_id, producer=pub, - add_to_parent=False) for stask in taskit] - parent = get_current_worker_task() - if add_to_parent and parent: - parent.add_trail(result) - return result - - def prepare(self, options, tasks, args, **kwargs): - options['group_id'] = group_id = ( - options.setdefault('task_id', uuid())) - - def prepare_member(task): - task = maybe_signature(task, app=self.app) - task.options['group_id'] = group_id - return task, task.freeze() - - try: - tasks, res = list(zip( - *[prepare_member(task) for task in tasks] - )) - except ValueError: # tasks empty - tasks, res = [], [] - return (tasks, self.app.GroupResult(group_id, res), group_id, args) - - def apply_async(self, partial_args=(), kwargs={}, **options): - if self.app.conf.CELERY_ALWAYS_EAGER: - return self.apply(partial_args, kwargs, **options) - tasks, result, gid, args = self.prepare( - options, args=partial_args, **kwargs - ) - super(Group, self).apply_async(( - list(tasks), result.as_tuple(), gid, args), **options - ) - return result - - def apply(self, args=(), kwargs={}, **options): - return super(Group, self).apply( - self.prepare(options, args=args, **kwargs), - **options).get() - return Group - - -@connect_on_app_finalize -def add_chain_task(app): - from celery.canvas import ( - Signature, chain, chord, group, maybe_signature, maybe_unroll_group, - ) - - _app = app - - class Chain(app.Task): - app = _app - name = 'celery.chain' - accept_magic_kwargs = False - _decorated = True - - def prepare_steps(self, args, tasks): - app = self.app - steps = deque(tasks) - next_step = prev_task = prev_res = None - tasks, results = [], [] - i = 0 - while steps: - # First task get partial args from chain. - task = maybe_signature(steps.popleft(), app=app) - task = task.clone() if i else task.clone(args) - res = task.freeze() - i += 1 - - if isinstance(task, group): - task = maybe_unroll_group(task) - if isinstance(task, chain): - # splice the chain - steps.extendleft(reversed(task.tasks)) - continue - - elif isinstance(task, group) and steps and \ - not isinstance(steps[0], group): - # automatically upgrade group(..) | s to chord(group, s) - try: - next_step = steps.popleft() - # for chords we freeze by pretending it's a normal - # task instead of a group. - res = Signature.freeze(next_step) - task = chord(task, body=next_step, task_id=res.task_id) - except IndexError: - pass # no callback, so keep as group - if prev_task: - # link previous task to this task. - prev_task.link(task) - # set the results parent attribute. - if not res.parent: - res.parent = prev_res - - if not isinstance(prev_task, chord): - results.append(res) - tasks.append(task) - prev_task, prev_res = task, res - - return tasks, results - - def apply_async(self, args=(), kwargs={}, group_id=None, chord=None, - task_id=None, link=None, link_error=None, **options): - if self.app.conf.CELERY_ALWAYS_EAGER: - return self.apply(args, kwargs, **options) - options.pop('publisher', None) - tasks, results = self.prepare_steps(args, kwargs['tasks']) - result = results[-1] - if group_id: - tasks[-1].set(group_id=group_id) - if chord: - tasks[-1].set(chord=chord) - if task_id: - tasks[-1].set(task_id=task_id) - result = tasks[-1].type.AsyncResult(task_id) - # make sure we can do a link() and link_error() on a chain object. - if link: - tasks[-1].set(link=link) - # and if any task in the chain fails, call the errbacks - if link_error: - for task in tasks: - task.set(link_error=link_error) - tasks[0].apply_async(**options) - return result - - def apply(self, args=(), kwargs={}, signature=maybe_signature, - **options): - app = self.app - last, fargs = None, args # fargs passed to first task only - for task in kwargs['tasks']: - res = signature(task, app=app).clone(fargs).apply( - last and (last.get(), ), - ) - res.parent, last, fargs = last, res, None - return last - return Chain - - -@connect_on_app_finalize -def add_chord_task(app): - """Every chord is executed in a dedicated task, so that the chord - can be used as a signature, and this generates the task - responsible for that.""" - from celery import group - from celery.canvas import maybe_signature - _app = app - default_propagate = app.conf.CELERY_CHORD_PROPAGATES - - class Chord(app.Task): - app = _app - name = 'celery.chord' - accept_magic_kwargs = False - ignore_result = False - _decorated = True - - def run(self, header, body, partial_args=(), interval=None, - countdown=1, max_retries=None, propagate=None, - eager=False, **kwargs): - app = self.app - propagate = default_propagate if propagate is None else propagate - group_id = uuid() - - # - convert back to group if serialized - tasks = header.tasks if isinstance(header, group) else header - header = group([ - maybe_signature(s, app=app).clone() for s in tasks - ], app=self.app) - # - eager applies the group inline - if eager: - return header.apply(args=partial_args, task_id=group_id) - - body['chord_size'] = len(header.tasks) - results = header.freeze(group_id=group_id, chord=body).results - - return self.backend.apply_chord( - header, partial_args, group_id, - body, interval=interval, countdown=countdown, - max_retries=max_retries, propagate=propagate, result=results, - ) - - def apply_async(self, args=(), kwargs={}, task_id=None, - group_id=None, chord=None, **options): - app = self.app - if app.conf.CELERY_ALWAYS_EAGER: - return self.apply(args, kwargs, **options) - header = kwargs.pop('header') - body = kwargs.pop('body') - header, body = (maybe_signature(header, app=app), - maybe_signature(body, app=app)) - # forward certain options to body - if chord is not None: - body.options['chord'] = chord - if group_id is not None: - body.options['group_id'] = group_id - [body.link(s) for s in options.pop('link', [])] - [body.link_error(s) for s in options.pop('link_error', [])] - body_result = body.freeze(task_id) - parent = super(Chord, self).apply_async((header, body, args), - kwargs, **options) - body_result.parent = parent - return body_result - - def apply(self, args=(), kwargs={}, propagate=True, **options): - body = kwargs['body'] - res = super(Chord, self).apply(args, dict(kwargs, eager=True), - **options) - return maybe_signature(body, app=self.app).apply( - args=(res.get(propagate=propagate).get(), )) - return Chord diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/control.py b/thesisenv/lib/python3.6/site-packages/celery/app/control.py deleted file mode 100644 index 7258dd6..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/control.py +++ /dev/null @@ -1,317 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.app.control - ~~~~~~~~~~~~~~~~~~~ - - Client for worker remote control commands. - Server implementation is in :mod:`celery.worker.control`. - -""" -from __future__ import absolute_import - -import warnings - -from kombu.pidbox import Mailbox -from kombu.utils import cached_property - -from celery.exceptions import DuplicateNodenameWarning -from celery.utils.text import pluralize - -__all__ = ['Inspect', 'Control', 'flatten_reply'] - -W_DUPNODE = """\ -Received multiple replies from node {0}: {1}. -Please make sure you give each node a unique nodename using the `-n` option.\ -""" - - -def flatten_reply(reply): - nodes, dupes = {}, set() - for item in reply: - [dupes.add(name) for name in item if name in nodes] - nodes.update(item) - if dupes: - warnings.warn(DuplicateNodenameWarning( - W_DUPNODE.format( - pluralize(len(dupes), 'name'), ', '.join(sorted(dupes)), - ), - )) - return nodes - - -class Inspect(object): - app = None - - def __init__(self, destination=None, timeout=1, callback=None, - connection=None, app=None, limit=None): - self.app = app or self.app - self.destination = destination - self.timeout = timeout - self.callback = callback - self.connection = connection - self.limit = limit - - def _prepare(self, reply): - if not reply: - return - by_node = flatten_reply(reply) - if self.destination and \ - not isinstance(self.destination, (list, tuple)): - return by_node.get(self.destination) - return by_node - - def _request(self, command, **kwargs): - return self._prepare(self.app.control.broadcast( - command, - arguments=kwargs, - destination=self.destination, - callback=self.callback, - connection=self.connection, - limit=self.limit, - timeout=self.timeout, reply=True, - )) - - def report(self): - return self._request('report') - - def clock(self): - return self._request('clock') - - def active(self, safe=False): - return self._request('dump_active', safe=safe) - - def scheduled(self, safe=False): - return self._request('dump_schedule', safe=safe) - - def reserved(self, safe=False): - return self._request('dump_reserved', safe=safe) - - def stats(self): - return self._request('stats') - - def revoked(self): - return self._request('dump_revoked') - - def registered(self, *taskinfoitems): - return self._request('dump_tasks', taskinfoitems=taskinfoitems) - registered_tasks = registered - - def ping(self): - return self._request('ping') - - def active_queues(self): - return self._request('active_queues') - - def query_task(self, ids): - return self._request('query_task', ids=ids) - - def conf(self, with_defaults=False): - return self._request('dump_conf', with_defaults=with_defaults) - - def hello(self, from_node, revoked=None): - return self._request('hello', from_node=from_node, revoked=revoked) - - def memsample(self): - return self._request('memsample') - - def memdump(self, samples=10): - return self._request('memdump', samples=samples) - - def objgraph(self, type='Request', n=200, max_depth=10): - return self._request('objgraph', num=n, max_depth=max_depth, type=type) - - -class Control(object): - Mailbox = Mailbox - - def __init__(self, app=None): - self.app = app - self.mailbox = self.Mailbox('celery', type='fanout', accept=['json']) - - @cached_property - def inspect(self): - return self.app.subclass_with_self(Inspect, reverse='control.inspect') - - def purge(self, connection=None): - """Discard all waiting tasks. - - This will ignore all tasks waiting for execution, and they will - be deleted from the messaging server. - - :returns: the number of tasks discarded. - - """ - with self.app.connection_or_acquire(connection) as conn: - return self.app.amqp.TaskConsumer(conn).purge() - discard_all = purge - - def election(self, id, topic, action=None, connection=None): - self.broadcast('election', connection=connection, arguments={ - 'id': id, 'topic': topic, 'action': action, - }) - - def revoke(self, task_id, destination=None, terminate=False, - signal='SIGTERM', **kwargs): - """Tell all (or specific) workers to revoke a task by id. - - If a task is revoked, the workers will ignore the task and - not execute it after all. - - :param task_id: Id of the task to revoke. - :keyword terminate: Also terminate the process currently working - on the task (if any). - :keyword signal: Name of signal to send to process if terminate. - Default is TERM. - - See :meth:`broadcast` for supported keyword arguments. - - """ - return self.broadcast('revoke', destination=destination, - arguments={'task_id': task_id, - 'terminate': terminate, - 'signal': signal}, **kwargs) - - def ping(self, destination=None, timeout=1, **kwargs): - """Ping all (or specific) workers. - - Will return the list of answers. - - See :meth:`broadcast` for supported keyword arguments. - - """ - return self.broadcast('ping', reply=True, destination=destination, - timeout=timeout, **kwargs) - - def rate_limit(self, task_name, rate_limit, destination=None, **kwargs): - """Tell all (or specific) workers to set a new rate limit - for task by type. - - :param task_name: Name of task to change rate limit for. - :param rate_limit: The rate limit as tasks per second, or a rate limit - string (`'100/m'`, etc. - see :attr:`celery.task.base.Task.rate_limit` for - more information). - - See :meth:`broadcast` for supported keyword arguments. - - """ - return self.broadcast('rate_limit', destination=destination, - arguments={'task_name': task_name, - 'rate_limit': rate_limit}, - **kwargs) - - def add_consumer(self, queue, exchange=None, exchange_type='direct', - routing_key=None, options=None, **kwargs): - """Tell all (or specific) workers to start consuming from a new queue. - - Only the queue name is required as if only the queue is specified - then the exchange/routing key will be set to the same name ( - like automatic queues do). - - .. note:: - - This command does not respect the default queue/exchange - options in the configuration. - - :param queue: Name of queue to start consuming from. - :keyword exchange: Optional name of exchange. - :keyword exchange_type: Type of exchange (defaults to 'direct') - command to, when empty broadcast to all workers. - :keyword routing_key: Optional routing key. - :keyword options: Additional options as supported - by :meth:`kombu.entitiy.Queue.from_dict`. - - See :meth:`broadcast` for supported keyword arguments. - - """ - return self.broadcast( - 'add_consumer', - arguments=dict({'queue': queue, 'exchange': exchange, - 'exchange_type': exchange_type, - 'routing_key': routing_key}, **options or {}), - **kwargs - ) - - def cancel_consumer(self, queue, **kwargs): - """Tell all (or specific) workers to stop consuming from ``queue``. - - Supports the same keyword arguments as :meth:`broadcast`. - - """ - return self.broadcast( - 'cancel_consumer', arguments={'queue': queue}, **kwargs - ) - - def time_limit(self, task_name, soft=None, hard=None, **kwargs): - """Tell all (or specific) workers to set time limits for - a task by type. - - :param task_name: Name of task to change time limits for. - :keyword soft: New soft time limit (in seconds). - :keyword hard: New hard time limit (in seconds). - - Any additional keyword arguments are passed on to :meth:`broadcast`. - - """ - return self.broadcast( - 'time_limit', - arguments={'task_name': task_name, - 'hard': hard, 'soft': soft}, **kwargs) - - def enable_events(self, destination=None, **kwargs): - """Tell all (or specific) workers to enable events.""" - return self.broadcast('enable_events', {}, destination, **kwargs) - - def disable_events(self, destination=None, **kwargs): - """Tell all (or specific) workers to disable events.""" - return self.broadcast('disable_events', {}, destination, **kwargs) - - def pool_grow(self, n=1, destination=None, **kwargs): - """Tell all (or specific) workers to grow the pool by ``n``. - - Supports the same arguments as :meth:`broadcast`. - - """ - return self.broadcast('pool_grow', {'n': n}, destination, **kwargs) - - def pool_shrink(self, n=1, destination=None, **kwargs): - """Tell all (or specific) workers to shrink the pool by ``n``. - - Supports the same arguments as :meth:`broadcast`. - - """ - return self.broadcast('pool_shrink', {'n': n}, destination, **kwargs) - - def autoscale(self, max, min, destination=None, **kwargs): - """Change worker(s) autoscale setting. - - Supports the same arguments as :meth:`broadcast`. - - """ - return self.broadcast( - 'autoscale', {'max': max, 'min': min}, destination, **kwargs) - - def broadcast(self, command, arguments=None, destination=None, - connection=None, reply=False, timeout=1, limit=None, - callback=None, channel=None, **extra_kwargs): - """Broadcast a control command to the celery workers. - - :param command: Name of command to send. - :param arguments: Keyword arguments for the command. - :keyword destination: If set, a list of the hosts to send the - command to, when empty broadcast to all workers. - :keyword connection: Custom broker connection to use, if not set, - a connection will be established automatically. - :keyword reply: Wait for and return the reply. - :keyword timeout: Timeout in seconds to wait for the reply. - :keyword limit: Limit number of replies. - :keyword callback: Callback called immediately for each reply - received. - - """ - with self.app.connection_or_acquire(connection) as conn: - arguments = dict(arguments or {}, **extra_kwargs) - return self.mailbox(conn)._broadcast( - command, arguments, destination, reply, timeout, - limit, callback, channel=channel, - ) diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/defaults.py b/thesisenv/lib/python3.6/site-packages/celery/app/defaults.py deleted file mode 100644 index aa7dd45..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/defaults.py +++ /dev/null @@ -1,274 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.app.defaults - ~~~~~~~~~~~~~~~~~~~ - - Configuration introspection and defaults. - -""" -from __future__ import absolute_import - -import sys - -from collections import deque, namedtuple -from datetime import timedelta - -from celery.five import items -from celery.utils import strtobool -from celery.utils.functional import memoize - -__all__ = ['Option', 'NAMESPACES', 'flatten', 'find'] - -is_jython = sys.platform.startswith('java') -is_pypy = hasattr(sys, 'pypy_version_info') - -DEFAULT_POOL = 'prefork' -if is_jython: - DEFAULT_POOL = 'threads' -elif is_pypy: - if sys.pypy_version_info[0:3] < (1, 5, 0): - DEFAULT_POOL = 'solo' - else: - DEFAULT_POOL = 'prefork' - -DEFAULT_ACCEPT_CONTENT = ['json', 'pickle', 'msgpack', 'yaml'] -DEFAULT_PROCESS_LOG_FMT = """ - [%(asctime)s: %(levelname)s/%(processName)s] %(message)s -""".strip() -DEFAULT_LOG_FMT = '[%(asctime)s: %(levelname)s] %(message)s' -DEFAULT_TASK_LOG_FMT = """[%(asctime)s: %(levelname)s/%(processName)s] \ -%(task_name)s[%(task_id)s]: %(message)s""" - -_BROKER_OLD = {'deprecate_by': '2.5', 'remove_by': '4.0', - 'alt': 'BROKER_URL setting'} -_REDIS_OLD = {'deprecate_by': '2.5', 'remove_by': '4.0', - 'alt': 'URL form of CELERY_RESULT_BACKEND'} - -searchresult = namedtuple('searchresult', ('namespace', 'key', 'type')) - - -# logging: processName first introduced in Py 2.6.2 (Issue #1644). -if sys.version_info < (2, 6, 2): - DEFAULT_PROCESS_LOG_FMT = DEFAULT_LOG_FMT - - -class Option(object): - alt = None - deprecate_by = None - remove_by = None - typemap = dict(string=str, int=int, float=float, any=lambda v: v, - bool=strtobool, dict=dict, tuple=tuple) - - def __init__(self, default=None, *args, **kwargs): - self.default = default - self.type = kwargs.get('type') or 'string' - for attr, value in items(kwargs): - setattr(self, attr, value) - - def to_python(self, value): - return self.typemap[self.type](value) - - def __repr__(self): - return '{0} default->{1!r}>'.format(self.type, - self.default) - -NAMESPACES = { - 'BROKER': { - 'URL': Option(None, type='string'), - 'CONNECTION_TIMEOUT': Option(4, type='float'), - 'CONNECTION_RETRY': Option(True, type='bool'), - 'CONNECTION_MAX_RETRIES': Option(100, type='int'), - 'FAILOVER_STRATEGY': Option(None, type='string'), - 'HEARTBEAT': Option(None, type='int'), - 'HEARTBEAT_CHECKRATE': Option(3.0, type='int'), - 'LOGIN_METHOD': Option(None, type='string'), - 'POOL_LIMIT': Option(10, type='int'), - 'USE_SSL': Option(False, type='bool'), - 'TRANSPORT': Option(type='string'), - 'TRANSPORT_OPTIONS': Option({}, type='dict'), - 'HOST': Option(type='string', **_BROKER_OLD), - 'PORT': Option(type='int', **_BROKER_OLD), - 'USER': Option(type='string', **_BROKER_OLD), - 'PASSWORD': Option(type='string', **_BROKER_OLD), - 'VHOST': Option(type='string', **_BROKER_OLD), - }, - 'CASSANDRA': { - 'COLUMN_FAMILY': Option(type='string'), - 'DETAILED_MODE': Option(False, type='bool'), - 'KEYSPACE': Option(type='string'), - 'READ_CONSISTENCY': Option(type='string'), - 'SERVERS': Option(type='list'), - 'WRITE_CONSISTENCY': Option(type='string'), - }, - 'CELERY': { - 'ACCEPT_CONTENT': Option(DEFAULT_ACCEPT_CONTENT, type='list'), - 'ACKS_LATE': Option(False, type='bool'), - 'ALWAYS_EAGER': Option(False, type='bool'), - 'ANNOTATIONS': Option(type='any'), - 'BROADCAST_QUEUE': Option('celeryctl'), - 'BROADCAST_EXCHANGE': Option('celeryctl'), - 'BROADCAST_EXCHANGE_TYPE': Option('fanout'), - 'CACHE_BACKEND': Option(), - 'CACHE_BACKEND_OPTIONS': Option({}, type='dict'), - 'CHORD_PROPAGATES': Option(True, type='bool'), - 'COUCHBASE_BACKEND_SETTINGS': Option(None, type='dict'), - 'CREATE_MISSING_QUEUES': Option(True, type='bool'), - 'DEFAULT_RATE_LIMIT': Option(type='string'), - 'DISABLE_RATE_LIMITS': Option(False, type='bool'), - 'DEFAULT_ROUTING_KEY': Option('celery'), - 'DEFAULT_QUEUE': Option('celery'), - 'DEFAULT_EXCHANGE': Option('celery'), - 'DEFAULT_EXCHANGE_TYPE': Option('direct'), - 'DEFAULT_DELIVERY_MODE': Option(2, type='string'), - 'EAGER_PROPAGATES_EXCEPTIONS': Option(False, type='bool'), - 'ENABLE_UTC': Option(True, type='bool'), - 'ENABLE_REMOTE_CONTROL': Option(True, type='bool'), - 'EVENT_SERIALIZER': Option('json'), - 'EVENT_QUEUE_EXPIRES': Option(None, type='float'), - 'EVENT_QUEUE_TTL': Option(None, type='float'), - 'IMPORTS': Option((), type='tuple'), - 'INCLUDE': Option((), type='tuple'), - 'IGNORE_RESULT': Option(False, type='bool'), - 'MAX_CACHED_RESULTS': Option(100, type='int'), - 'MESSAGE_COMPRESSION': Option(type='string'), - 'MONGODB_BACKEND_SETTINGS': Option(type='dict'), - 'REDIS_HOST': Option(type='string', **_REDIS_OLD), - 'REDIS_PORT': Option(type='int', **_REDIS_OLD), - 'REDIS_DB': Option(type='int', **_REDIS_OLD), - 'REDIS_PASSWORD': Option(type='string', **_REDIS_OLD), - 'REDIS_MAX_CONNECTIONS': Option(type='int'), - 'RESULT_BACKEND': Option(type='string'), - 'RESULT_DB_SHORT_LIVED_SESSIONS': Option(False, type='bool'), - 'RESULT_DB_TABLENAMES': Option(type='dict'), - 'RESULT_DBURI': Option(), - 'RESULT_ENGINE_OPTIONS': Option(type='dict'), - 'RESULT_EXCHANGE': Option('celeryresults'), - 'RESULT_EXCHANGE_TYPE': Option('direct'), - 'RESULT_SERIALIZER': Option('pickle'), - 'RESULT_PERSISTENT': Option(None, type='bool'), - 'ROUTES': Option(type='any'), - 'SEND_EVENTS': Option(False, type='bool'), - 'SEND_TASK_ERROR_EMAILS': Option(False, type='bool'), - 'SEND_TASK_SENT_EVENT': Option(False, type='bool'), - 'STORE_ERRORS_EVEN_IF_IGNORED': Option(False, type='bool'), - 'TASK_PUBLISH_RETRY': Option(True, type='bool'), - 'TASK_PUBLISH_RETRY_POLICY': Option({ - 'max_retries': 3, - 'interval_start': 0, - 'interval_max': 1, - 'interval_step': 0.2}, type='dict'), - 'TASK_RESULT_EXPIRES': Option(timedelta(days=1), type='float'), - 'TASK_SERIALIZER': Option('pickle'), - 'TIMEZONE': Option(type='string'), - 'TRACK_STARTED': Option(False, type='bool'), - 'REDIRECT_STDOUTS': Option(True, type='bool'), - 'REDIRECT_STDOUTS_LEVEL': Option('WARNING'), - 'QUEUES': Option(type='dict'), - 'QUEUE_HA_POLICY': Option(None, type='string'), - 'SECURITY_KEY': Option(type='string'), - 'SECURITY_CERTIFICATE': Option(type='string'), - 'SECURITY_CERT_STORE': Option(type='string'), - 'WORKER_DIRECT': Option(False, type='bool'), - }, - 'CELERYD': { - 'AGENT': Option(None, type='string'), - 'AUTOSCALER': Option('celery.worker.autoscale:Autoscaler'), - 'AUTORELOADER': Option('celery.worker.autoreload:Autoreloader'), - 'CONCURRENCY': Option(0, type='int'), - 'TIMER': Option(type='string'), - 'TIMER_PRECISION': Option(1.0, type='float'), - 'FORCE_EXECV': Option(False, type='bool'), - 'HIJACK_ROOT_LOGGER': Option(True, type='bool'), - 'CONSUMER': Option('celery.worker.consumer:Consumer', type='string'), - 'LOG_FORMAT': Option(DEFAULT_PROCESS_LOG_FMT), - 'LOG_COLOR': Option(type='bool'), - 'LOG_LEVEL': Option('WARN', deprecate_by='2.4', remove_by='4.0', - alt='--loglevel argument'), - 'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0', - alt='--logfile argument'), - 'MAX_TASKS_PER_CHILD': Option(type='int'), - 'POOL': Option(DEFAULT_POOL), - 'POOL_PUTLOCKS': Option(True, type='bool'), - 'POOL_RESTARTS': Option(False, type='bool'), - 'PREFETCH_MULTIPLIER': Option(4, type='int'), - 'STATE_DB': Option(), - 'TASK_LOG_FORMAT': Option(DEFAULT_TASK_LOG_FMT), - 'TASK_SOFT_TIME_LIMIT': Option(type='float'), - 'TASK_TIME_LIMIT': Option(type='float'), - 'WORKER_LOST_WAIT': Option(10.0, type='float') - }, - 'CELERYBEAT': { - 'SCHEDULE': Option({}, type='dict'), - 'SCHEDULER': Option('celery.beat:PersistentScheduler'), - 'SCHEDULE_FILENAME': Option('celerybeat-schedule'), - 'SYNC_EVERY': Option(0, type='int'), - 'MAX_LOOP_INTERVAL': Option(0, type='float'), - 'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='4.0', - alt='--loglevel argument'), - 'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0', - alt='--logfile argument'), - }, - 'CELERYMON': { - 'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='4.0', - alt='--loglevel argument'), - 'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0', - alt='--logfile argument'), - 'LOG_FORMAT': Option(DEFAULT_LOG_FMT), - }, - 'EMAIL': { - 'HOST': Option('localhost'), - 'PORT': Option(25, type='int'), - 'HOST_USER': Option(), - 'HOST_PASSWORD': Option(), - 'TIMEOUT': Option(2, type='float'), - 'USE_SSL': Option(False, type='bool'), - 'USE_TLS': Option(False, type='bool'), - }, - 'SERVER_EMAIL': Option('celery@localhost'), - 'ADMINS': Option((), type='tuple'), -} - - -def flatten(d, ns=''): - stack = deque([(ns, d)]) - while stack: - name, space = stack.popleft() - for key, value in items(space): - if isinstance(value, dict): - stack.append((name + key + '_', value)) - else: - yield name + key, value -DEFAULTS = dict((key, value.default) for key, value in flatten(NAMESPACES)) - - -def find_deprecated_settings(source): - from celery.utils import warn_deprecated - for name, opt in flatten(NAMESPACES): - if (opt.deprecate_by or opt.remove_by) and getattr(source, name, None): - warn_deprecated(description='The {0!r} setting'.format(name), - deprecation=opt.deprecate_by, - removal=opt.remove_by, - alternative='Use the {0.alt} instead'.format(opt)) - return source - - -@memoize(maxsize=None) -def find(name, namespace='celery'): - # - Try specified namespace first. - namespace = namespace.upper() - try: - return searchresult( - namespace, name.upper(), NAMESPACES[namespace][name.upper()], - ) - except KeyError: - # - Try all the other namespaces. - for ns, keys in items(NAMESPACES): - if ns.upper() == name.upper(): - return searchresult(None, ns, keys) - elif isinstance(keys, dict): - try: - return searchresult(ns, name.upper(), keys[name.upper()]) - except KeyError: - pass - # - See if name is a qualname last. - return searchresult(None, name.upper(), DEFAULTS[name.upper()]) diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/log.py b/thesisenv/lib/python3.6/site-packages/celery/app/log.py deleted file mode 100644 index 3d350e9..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/log.py +++ /dev/null @@ -1,257 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.app.log - ~~~~~~~~~~~~~~ - - The Celery instances logging section: ``Celery.log``. - - Sets up logging for the worker and other programs, - redirects stdouts, colors log output, patches logging - related compatibility fixes, and so on. - -""" -from __future__ import absolute_import - -import logging -import os -import sys - -from logging.handlers import WatchedFileHandler - -from kombu.log import NullHandler -from kombu.utils.encoding import set_default_encoding_file - -from celery import signals -from celery._state import get_current_task -from celery.five import class_property, string_t -from celery.utils import isatty, node_format -from celery.utils.log import ( - get_logger, mlevel, - ColorFormatter, ensure_process_aware_logger, - LoggingProxy, get_multiprocessing_logger, - reset_multiprocessing_logger, -) -from celery.utils.term import colored - -__all__ = ['TaskFormatter', 'Logging'] - -MP_LOG = os.environ.get('MP_LOG', False) - - -class TaskFormatter(ColorFormatter): - - def format(self, record): - task = get_current_task() - if task and task.request: - record.__dict__.update(task_id=task.request.id, - task_name=task.name) - else: - record.__dict__.setdefault('task_name', '???') - record.__dict__.setdefault('task_id', '???') - return ColorFormatter.format(self, record) - - -class Logging(object): - #: The logging subsystem is only configured once per process. - #: setup_logging_subsystem sets this flag, and subsequent calls - #: will do nothing. - _setup = False - - def __init__(self, app): - self.app = app - self.loglevel = mlevel(self.app.conf.CELERYD_LOG_LEVEL) - self.format = self.app.conf.CELERYD_LOG_FORMAT - self.task_format = self.app.conf.CELERYD_TASK_LOG_FORMAT - self.colorize = self.app.conf.CELERYD_LOG_COLOR - - def setup(self, loglevel=None, logfile=None, redirect_stdouts=False, - redirect_level='WARNING', colorize=None, hostname=None): - handled = self.setup_logging_subsystem( - loglevel, logfile, colorize=colorize, hostname=hostname, - ) - if not handled: - if redirect_stdouts: - self.redirect_stdouts(redirect_level) - os.environ.update( - CELERY_LOG_LEVEL=str(loglevel) if loglevel else '', - CELERY_LOG_FILE=str(logfile) if logfile else '', - ) - return handled - - def redirect_stdouts(self, loglevel=None, name='celery.redirected'): - self.redirect_stdouts_to_logger( - get_logger(name), loglevel=loglevel - ) - os.environ.update( - CELERY_LOG_REDIRECT='1', - CELERY_LOG_REDIRECT_LEVEL=str(loglevel or ''), - ) - - def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None, - colorize=None, hostname=None, **kwargs): - if self.already_setup: - return - if logfile and hostname: - logfile = node_format(logfile, hostname) - self.already_setup = True - loglevel = mlevel(loglevel or self.loglevel) - format = format or self.format - colorize = self.supports_color(colorize, logfile) - reset_multiprocessing_logger() - ensure_process_aware_logger() - receivers = signals.setup_logging.send( - sender=None, loglevel=loglevel, logfile=logfile, - format=format, colorize=colorize, - ) - - if not receivers: - root = logging.getLogger() - - if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER: - root.handlers = [] - get_logger('celery').handlers = [] - get_logger('celery.task').handlers = [] - get_logger('celery.redirected').handlers = [] - - # Configure root logger - self._configure_logger( - root, logfile, loglevel, format, colorize, **kwargs - ) - - # Configure the multiprocessing logger - self._configure_logger( - get_multiprocessing_logger(), - logfile, loglevel if MP_LOG else logging.ERROR, - format, colorize, **kwargs - ) - - signals.after_setup_logger.send( - sender=None, logger=root, - loglevel=loglevel, logfile=logfile, - format=format, colorize=colorize, - ) - - # then setup the root task logger. - self.setup_task_loggers(loglevel, logfile, colorize=colorize) - - try: - stream = logging.getLogger().handlers[0].stream - except (AttributeError, IndexError): - pass - else: - set_default_encoding_file(stream) - - # This is a hack for multiprocessing's fork+exec, so that - # logging before Process.run works. - logfile_name = logfile if isinstance(logfile, string_t) else '' - os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel), - _MP_FORK_LOGFILE_=logfile_name, - _MP_FORK_LOGFORMAT_=format) - return receivers - - def _configure_logger(self, logger, logfile, loglevel, - format, colorize, **kwargs): - if logger is not None: - self.setup_handlers(logger, logfile, format, - colorize, **kwargs) - if loglevel: - logger.setLevel(loglevel) - - def setup_task_loggers(self, loglevel=None, logfile=None, format=None, - colorize=None, propagate=False, **kwargs): - """Setup the task logger. - - If `logfile` is not specified, then `sys.stderr` is used. - - Will return the base task logger object. - - """ - loglevel = mlevel(loglevel or self.loglevel) - format = format or self.task_format - colorize = self.supports_color(colorize, logfile) - - logger = self.setup_handlers( - get_logger('celery.task'), - logfile, format, colorize, - formatter=TaskFormatter, **kwargs - ) - logger.setLevel(loglevel) - # this is an int for some reason, better not question why. - logger.propagate = int(propagate) - signals.after_setup_task_logger.send( - sender=None, logger=logger, - loglevel=loglevel, logfile=logfile, - format=format, colorize=colorize, - ) - return logger - - def redirect_stdouts_to_logger(self, logger, loglevel=None, - stdout=True, stderr=True): - """Redirect :class:`sys.stdout` and :class:`sys.stderr` to a - logging instance. - - :param logger: The :class:`logging.Logger` instance to redirect to. - :param loglevel: The loglevel redirected messages will be logged as. - - """ - proxy = LoggingProxy(logger, loglevel) - if stdout: - sys.stdout = proxy - if stderr: - sys.stderr = proxy - return proxy - - def supports_color(self, colorize=None, logfile=None): - colorize = self.colorize if colorize is None else colorize - if self.app.IS_WINDOWS: - # Windows does not support ANSI color codes. - return False - if colorize or colorize is None: - # Only use color if there is no active log file - # and stderr is an actual terminal. - return logfile is None and isatty(sys.stderr) - return colorize - - def colored(self, logfile=None, enabled=None): - return colored(enabled=self.supports_color(enabled, logfile)) - - def setup_handlers(self, logger, logfile, format, colorize, - formatter=ColorFormatter, **kwargs): - if self._is_configured(logger): - return logger - handler = self._detect_handler(logfile) - handler.setFormatter(formatter(format, use_color=colorize)) - logger.addHandler(handler) - return logger - - def _detect_handler(self, logfile=None): - """Create log handler with either a filename, an open stream - or :const:`None` (stderr).""" - logfile = sys.__stderr__ if logfile is None else logfile - if hasattr(logfile, 'write'): - return logging.StreamHandler(logfile) - return WatchedFileHandler(logfile) - - def _has_handler(self, logger): - if logger.handlers: - return any(not isinstance(h, NullHandler) for h in logger.handlers) - - def _is_configured(self, logger): - return self._has_handler(logger) and not getattr( - logger, '_rudimentary_setup', False) - - def setup_logger(self, name='celery', *args, **kwargs): - """Deprecated: No longer used.""" - self.setup_logging_subsystem(*args, **kwargs) - return logging.root - - def get_default_logger(self, name='celery', **kwargs): - return get_logger(name) - - @class_property - def already_setup(cls): - return cls._setup - - @already_setup.setter # noqa - def already_setup(cls, was_setup): - cls._setup = was_setup diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/registry.py b/thesisenv/lib/python3.6/site-packages/celery/app/registry.py deleted file mode 100644 index 7046554..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/registry.py +++ /dev/null @@ -1,71 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.app.registry - ~~~~~~~~~~~~~~~~~~~ - - Registry of available tasks. - -""" -from __future__ import absolute_import - -import inspect - -from importlib import import_module - -from celery._state import get_current_app -from celery.exceptions import NotRegistered -from celery.five import items - -__all__ = ['TaskRegistry'] - - -class TaskRegistry(dict): - NotRegistered = NotRegistered - - def __missing__(self, key): - raise self.NotRegistered(key) - - def register(self, task): - """Register a task in the task registry. - - The task will be automatically instantiated if not already an - instance. - - """ - self[task.name] = inspect.isclass(task) and task() or task - - def unregister(self, name): - """Unregister task by name. - - :param name: name of the task to unregister, or a - :class:`celery.task.base.Task` with a valid `name` attribute. - - :raises celery.exceptions.NotRegistered: if the task has not - been registered. - - """ - try: - self.pop(getattr(name, 'name', name)) - except KeyError: - raise self.NotRegistered(name) - - # -- these methods are irrelevant now and will be removed in 4.0 - def regular(self): - return self.filter_types('regular') - - def periodic(self): - return self.filter_types('periodic') - - def filter_types(self, type): - return dict((name, task) for name, task in items(self) - if getattr(task, 'type', 'regular') == type) - - -def _unpickle_task(name): - return get_current_app().tasks[name] - - -def _unpickle_task_v2(name, module=None): - if module: - import_module(module) - return get_current_app().tasks[name] diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/routes.py b/thesisenv/lib/python3.6/site-packages/celery/app/routes.py deleted file mode 100644 index b1e7314..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/routes.py +++ /dev/null @@ -1,95 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.routes - ~~~~~~~~~~~~~ - - Contains utilities for working with task routers, - (:setting:`CELERY_ROUTES`). - -""" -from __future__ import absolute_import - -from celery.exceptions import QueueNotFound -from celery.five import string_t -from celery.utils import lpmerge -from celery.utils.functional import firstmethod, mlazy -from celery.utils.imports import instantiate - -__all__ = ['MapRoute', 'Router', 'prepare'] - -_first_route = firstmethod('route_for_task') - - -class MapRoute(object): - """Creates a router out of a :class:`dict`.""" - - def __init__(self, map): - self.map = map - - def route_for_task(self, task, *args, **kwargs): - try: - return dict(self.map[task]) - except KeyError: - pass - except ValueError: - return {'queue': self.map[task]} - - -class Router(object): - - def __init__(self, routes=None, queues=None, - create_missing=False, app=None): - self.app = app - self.queues = {} if queues is None else queues - self.routes = [] if routes is None else routes - self.create_missing = create_missing - - def route(self, options, task, args=(), kwargs={}): - options = self.expand_destination(options) # expands 'queue' - if self.routes: - route = self.lookup_route(task, args, kwargs) - if route: # expands 'queue' in route. - return lpmerge(self.expand_destination(route), options) - if 'queue' not in options: - options = lpmerge(self.expand_destination( - self.app.conf.CELERY_DEFAULT_QUEUE), options) - return options - - def expand_destination(self, route): - # Route can be a queue name: convenient for direct exchanges. - if isinstance(route, string_t): - queue, route = route, {} - else: - # can use defaults from configured queue, but override specific - # things (like the routing_key): great for topic exchanges. - queue = route.pop('queue', None) - - if queue: - try: - Q = self.queues[queue] # noqa - except KeyError: - raise QueueNotFound( - 'Queue {0!r} missing from CELERY_QUEUES'.format(queue)) - # needs to be declared by publisher - route['queue'] = Q - return route - - def lookup_route(self, task, args=None, kwargs=None): - return _first_route(self.routes, task, args, kwargs) - - -def prepare(routes): - """Expands the :setting:`CELERY_ROUTES` setting.""" - - def expand_route(route): - if isinstance(route, dict): - return MapRoute(route) - if isinstance(route, string_t): - return mlazy(instantiate, route) - return route - - if routes is None: - return () - if not isinstance(routes, (list, tuple)): - routes = (routes, ) - return [expand_route(route) for route in routes] diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/task.py b/thesisenv/lib/python3.6/site-packages/celery/app/task.py deleted file mode 100644 index 3360005..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/task.py +++ /dev/null @@ -1,948 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.app.task - ~~~~~~~~~~~~~~~ - - Task Implementation: Task request context, and the base task class. - -""" -from __future__ import absolute_import - -import sys - -from billiard.einfo import ExceptionInfo - -from celery import current_app -from celery import states -from celery._state import _task_stack -from celery.canvas import signature -from celery.exceptions import MaxRetriesExceededError, Reject, Retry -from celery.five import class_property, items, with_metaclass -from celery.local import Proxy -from celery.result import EagerResult -from celery.utils import gen_task_name, fun_takes_kwargs, uuid, maybe_reraise -from celery.utils.functional import mattrgetter, maybe_list -from celery.utils.imports import instantiate -from celery.utils.mail import ErrorMail - -from .annotations import resolve_all as resolve_all_annotations -from .registry import _unpickle_task_v2 -from .utils import appstr - -__all__ = ['Context', 'Task'] - -#: extracts attributes related to publishing a message from an object. -extract_exec_options = mattrgetter( - 'queue', 'routing_key', 'exchange', 'priority', 'expires', - 'serializer', 'delivery_mode', 'compression', 'time_limit', - 'soft_time_limit', 'immediate', 'mandatory', # imm+man is deprecated -) - -# We take __repr__ very seriously around here ;) -R_BOUND_TASK = '' -R_UNBOUND_TASK = '' -R_SELF_TASK = '<@task {0.name} bound to other {0.__self__}>' -R_INSTANCE = '<@task: {0.name} of {app}{flags}>' - - -class _CompatShared(object): - - def __init__(self, name, cons): - self.name = name - self.cons = cons - - def __hash__(self): - return hash(self.name) - - def __repr__(self): - return '' % (self.name, ) - - def __call__(self, app): - return self.cons(app) - - -def _strflags(flags, default=''): - if flags: - return ' ({0})'.format(', '.join(flags)) - return default - - -def _reprtask(task, fmt=None, flags=None): - flags = list(flags) if flags is not None else [] - flags.append('v2 compatible') if task.__v2_compat__ else None - if not fmt: - fmt = R_BOUND_TASK if task._app else R_UNBOUND_TASK - return fmt.format( - task, flags=_strflags(flags), - app=appstr(task._app) if task._app else None, - ) - - -class Context(object): - # Default context - logfile = None - loglevel = None - hostname = None - id = None - args = None - kwargs = None - retries = 0 - eta = None - expires = None - is_eager = False - headers = None - delivery_info = None - reply_to = None - correlation_id = None - taskset = None # compat alias to group - group = None - chord = None - utc = None - called_directly = True - callbacks = None - errbacks = None - timelimit = None - _children = None # see property - _protected = 0 - - def __init__(self, *args, **kwargs): - self.update(*args, **kwargs) - - def update(self, *args, **kwargs): - return self.__dict__.update(*args, **kwargs) - - def clear(self): - return self.__dict__.clear() - - def get(self, key, default=None): - return getattr(self, key, default) - - def __repr__(self): - return ''.format(vars(self)) - - @property - def children(self): - # children must be an empy list for every thread - if self._children is None: - self._children = [] - return self._children - - -class TaskType(type): - """Meta class for tasks. - - Automatically registers the task in the task registry (except - if the :attr:`Task.abstract`` attribute is set). - - If no :attr:`Task.name` attribute is provided, then the name is generated - from the module and class name. - - """ - _creation_count = {} # used by old non-abstract task classes - - def __new__(cls, name, bases, attrs): - new = super(TaskType, cls).__new__ - task_module = attrs.get('__module__') or '__main__' - - # - Abstract class: abstract attribute should not be inherited. - abstract = attrs.pop('abstract', None) - if abstract or not attrs.get('autoregister', True): - return new(cls, name, bases, attrs) - - # The 'app' attribute is now a property, with the real app located - # in the '_app' attribute. Previously this was a regular attribute, - # so we should support classes defining it. - app = attrs.pop('_app', None) or attrs.pop('app', None) - - # Attempt to inherit app from one the bases - if not isinstance(app, Proxy) and app is None: - for base in bases: - if getattr(base, '_app', None): - app = base._app - break - else: - app = current_app._get_current_object() - attrs['_app'] = app - - # - Automatically generate missing/empty name. - task_name = attrs.get('name') - if not task_name: - attrs['name'] = task_name = gen_task_name(app, name, task_module) - - if not attrs.get('_decorated'): - # non decorated tasks must also be shared in case - # an app is created multiple times due to modules - # imported under multiple names. - # Hairy stuff, here to be compatible with 2.x. - # People should not use non-abstract task classes anymore, - # use the task decorator. - from celery._state import connect_on_app_finalize - unique_name = '.'.join([task_module, name]) - if unique_name not in cls._creation_count: - # the creation count is used as a safety - # so that the same task is not added recursively - # to the set of constructors. - cls._creation_count[unique_name] = 1 - connect_on_app_finalize(_CompatShared( - unique_name, - lambda app: TaskType.__new__(cls, name, bases, - dict(attrs, _app=app)), - )) - - # - Create and register class. - # Because of the way import happens (recursively) - # we may or may not be the first time the task tries to register - # with the framework. There should only be one class for each task - # name, so we always return the registered version. - tasks = app._tasks - if task_name not in tasks: - tasks.register(new(cls, name, bases, attrs)) - instance = tasks[task_name] - instance.bind(app) - return instance.__class__ - - def __repr__(cls): - return _reprtask(cls) - - -@with_metaclass(TaskType) -class Task(object): - """Task base class. - - When called tasks apply the :meth:`run` method. This method must - be defined by all tasks (that is unless the :meth:`__call__` method - is overridden). - - """ - __trace__ = None - __v2_compat__ = False # set by old base in celery.task.base - - ErrorMail = ErrorMail - MaxRetriesExceededError = MaxRetriesExceededError - - #: Execution strategy used, or the qualified name of one. - Strategy = 'celery.worker.strategy:default' - - #: This is the instance bound to if the task is a method of a class. - __self__ = None - - #: The application instance associated with this task class. - _app = None - - #: Name of the task. - name = None - - #: If :const:`True` the task is an abstract base class. - abstract = True - - #: If disabled the worker will not forward magic keyword arguments. - #: Deprecated and scheduled for removal in v4.0. - accept_magic_kwargs = False - - #: Maximum number of retries before giving up. If set to :const:`None`, - #: it will **never** stop retrying. - max_retries = 3 - - #: Default time in seconds before a retry of the task should be - #: executed. 3 minutes by default. - default_retry_delay = 3 * 60 - - #: Rate limit for this task type. Examples: :const:`None` (no rate - #: limit), `'100/s'` (hundred tasks a second), `'100/m'` (hundred tasks - #: a minute),`'100/h'` (hundred tasks an hour) - rate_limit = None - - #: If enabled the worker will not store task state and return values - #: for this task. Defaults to the :setting:`CELERY_IGNORE_RESULT` - #: setting. - ignore_result = None - - #: If enabled the request will keep track of subtasks started by - #: this task, and this information will be sent with the result - #: (``result.children``). - trail = True - - #: If enabled the worker will send monitoring events related to - #: this task (but only if the worker is configured to send - #: task related events). - #: Note that this has no effect on the task-failure event case - #: where a task is not registered (as it will have no task class - #: to check this flag). - send_events = True - - #: When enabled errors will be stored even if the task is otherwise - #: configured to ignore results. - store_errors_even_if_ignored = None - - #: If enabled an email will be sent to :setting:`ADMINS` whenever a task - #: of this type fails. - send_error_emails = None - - #: The name of a serializer that are registered with - #: :mod:`kombu.serialization.registry`. Default is `'pickle'`. - serializer = None - - #: Hard time limit. - #: Defaults to the :setting:`CELERYD_TASK_TIME_LIMIT` setting. - time_limit = None - - #: Soft time limit. - #: Defaults to the :setting:`CELERYD_TASK_SOFT_TIME_LIMIT` setting. - soft_time_limit = None - - #: The result store backend used for this task. - backend = None - - #: If disabled this task won't be registered automatically. - autoregister = True - - #: If enabled the task will report its status as 'started' when the task - #: is executed by a worker. Disabled by default as the normal behaviour - #: is to not report that level of granularity. Tasks are either pending, - #: finished, or waiting to be retried. - #: - #: Having a 'started' status can be useful for when there are long - #: running tasks and there is a need to report which task is currently - #: running. - #: - #: The application default can be overridden using the - #: :setting:`CELERY_TRACK_STARTED` setting. - track_started = None - - #: When enabled messages for this task will be acknowledged **after** - #: the task has been executed, and not *just before* which is the - #: default behavior. - #: - #: Please note that this means the task may be executed twice if the - #: worker crashes mid execution (which may be acceptable for some - #: applications). - #: - #: The application default can be overridden with the - #: :setting:`CELERY_ACKS_LATE` setting. - acks_late = None - - #: Tuple of expected exceptions. - #: - #: These are errors that are expected in normal operation - #: and that should not be regarded as a real error by the worker. - #: Currently this means that the state will be updated to an error - #: state, but the worker will not log the event as an error. - throws = () - - #: Default task expiry time. - expires = None - - #: Some may expect a request to exist even if the task has not been - #: called. This should probably be deprecated. - _default_request = None - - _exec_options = None - - __bound__ = False - - from_config = ( - ('send_error_emails', 'CELERY_SEND_TASK_ERROR_EMAILS'), - ('serializer', 'CELERY_TASK_SERIALIZER'), - ('rate_limit', 'CELERY_DEFAULT_RATE_LIMIT'), - ('track_started', 'CELERY_TRACK_STARTED'), - ('acks_late', 'CELERY_ACKS_LATE'), - ('ignore_result', 'CELERY_IGNORE_RESULT'), - ('store_errors_even_if_ignored', - 'CELERY_STORE_ERRORS_EVEN_IF_IGNORED'), - ) - - _backend = None # set by backend property. - - __bound__ = False - - # - Tasks are lazily bound, so that configuration is not set - # - until the task is actually used - - @classmethod - def bind(self, app): - was_bound, self.__bound__ = self.__bound__, True - self._app = app - conf = app.conf - self._exec_options = None # clear option cache - - for attr_name, config_name in self.from_config: - if getattr(self, attr_name, None) is None: - setattr(self, attr_name, conf[config_name]) - if self.accept_magic_kwargs is None: - self.accept_magic_kwargs = app.accept_magic_kwargs - - # decorate with annotations from config. - if not was_bound: - self.annotate() - - from celery.utils.threads import LocalStack - self.request_stack = LocalStack() - - # PeriodicTask uses this to add itself to the PeriodicTask schedule. - self.on_bound(app) - - return app - - @classmethod - def on_bound(self, app): - """This method can be defined to do additional actions when the - task class is bound to an app.""" - pass - - @classmethod - def _get_app(self): - if self._app is None: - self._app = current_app - if not self.__bound__: - # The app property's __set__ method is not called - # if Task.app is set (on the class), so must bind on use. - self.bind(self._app) - return self._app - app = class_property(_get_app, bind) - - @classmethod - def annotate(self): - for d in resolve_all_annotations(self.app.annotations, self): - for key, value in items(d): - if key.startswith('@'): - self.add_around(key[1:], value) - else: - setattr(self, key, value) - - @classmethod - def add_around(self, attr, around): - orig = getattr(self, attr) - if getattr(orig, '__wrapped__', None): - orig = orig.__wrapped__ - meth = around(orig) - meth.__wrapped__ = orig - setattr(self, attr, meth) - - def __call__(self, *args, **kwargs): - _task_stack.push(self) - self.push_request() - try: - # add self if this is a bound task - if self.__self__ is not None: - return self.run(self.__self__, *args, **kwargs) - return self.run(*args, **kwargs) - finally: - self.pop_request() - _task_stack.pop() - - def __reduce__(self): - # - tasks are pickled into the name of the task only, and the reciever - # - simply grabs it from the local registry. - # - in later versions the module of the task is also included, - # - and the receiving side tries to import that module so that - # - it will work even if the task has not been registered. - mod = type(self).__module__ - mod = mod if mod and mod in sys.modules else None - return (_unpickle_task_v2, (self.name, mod), None) - - def run(self, *args, **kwargs): - """The body of the task executed by workers.""" - raise NotImplementedError('Tasks must define the run method.') - - def start_strategy(self, app, consumer, **kwargs): - return instantiate(self.Strategy, self, app, consumer, **kwargs) - - def delay(self, *args, **kwargs): - """Star argument version of :meth:`apply_async`. - - Does not support the extra options enabled by :meth:`apply_async`. - - :param \*args: positional arguments passed on to the task. - :param \*\*kwargs: keyword arguments passed on to the task. - - :returns :class:`celery.result.AsyncResult`: - - """ - return self.apply_async(args, kwargs) - - def apply_async(self, args=None, kwargs=None, task_id=None, producer=None, - link=None, link_error=None, **options): - """Apply tasks asynchronously by sending a message. - - :keyword args: The positional arguments to pass on to the - task (a :class:`list` or :class:`tuple`). - - :keyword kwargs: The keyword arguments to pass on to the - task (a :class:`dict`) - - :keyword countdown: Number of seconds into the future that the - task should execute. Defaults to immediate - execution. - - :keyword eta: A :class:`~datetime.datetime` object describing - the absolute time and date of when the task should - be executed. May not be specified if `countdown` - is also supplied. - - :keyword expires: Either a :class:`int`, describing the number of - seconds, or a :class:`~datetime.datetime` object - that describes the absolute time and date of when - the task should expire. The task will not be - executed after the expiration time. - - :keyword connection: Re-use existing broker connection instead - of establishing a new one. - - :keyword retry: If enabled sending of the task message will be retried - in the event of connection loss or failure. Default - is taken from the :setting:`CELERY_TASK_PUBLISH_RETRY` - setting. Note that you need to handle the - producer/connection manually for this to work. - - :keyword retry_policy: Override the retry policy used. See the - :setting:`CELERY_TASK_PUBLISH_RETRY_POLICY` - setting. - - :keyword routing_key: Custom routing key used to route the task to a - worker server. If in combination with a - ``queue`` argument only used to specify custom - routing keys to topic exchanges. - - :keyword queue: The queue to route the task to. This must be a key - present in :setting:`CELERY_QUEUES`, or - :setting:`CELERY_CREATE_MISSING_QUEUES` must be - enabled. See :ref:`guide-routing` for more - information. - - :keyword exchange: Named custom exchange to send the task to. - Usually not used in combination with the ``queue`` - argument. - - :keyword priority: The task priority, a number between 0 and 9. - Defaults to the :attr:`priority` attribute. - - :keyword serializer: A string identifying the default - serialization method to use. Can be `pickle`, - `json`, `yaml`, `msgpack` or any custom - serialization method that has been registered - with :mod:`kombu.serialization.registry`. - Defaults to the :attr:`serializer` attribute. - - :keyword compression: A string identifying the compression method - to use. Can be one of ``zlib``, ``bzip2``, - or any custom compression methods registered with - :func:`kombu.compression.register`. Defaults to - the :setting:`CELERY_MESSAGE_COMPRESSION` - setting. - :keyword link: A single, or a list of tasks to apply if the - task exits successfully. - :keyword link_error: A single, or a list of tasks to apply - if an error occurs while executing the task. - - :keyword producer: :class:~@amqp.TaskProducer` instance to use. - - :keyword add_to_parent: If set to True (default) and the task - is applied while executing another task, then the result - will be appended to the parent tasks ``request.children`` - attribute. Trailing can also be disabled by default using the - :attr:`trail` attribute - - :keyword publisher: Deprecated alias to ``producer``. - - :keyword headers: Message headers to be sent in the - task (a :class:`dict`) - - :rtype :class:`celery.result.AsyncResult`: if - :setting:`CELERY_ALWAYS_EAGER` is not set, otherwise - :class:`celery.result.EagerResult`. - - Also supports all keyword arguments supported by - :meth:`kombu.Producer.publish`. - - .. note:: - If the :setting:`CELERY_ALWAYS_EAGER` setting is set, it will - be replaced by a local :func:`apply` call instead. - - """ - app = self._get_app() - if app.conf.CELERY_ALWAYS_EAGER: - return self.apply(args, kwargs, task_id=task_id or uuid(), - link=link, link_error=link_error, **options) - # add 'self' if this is a "task_method". - if self.__self__ is not None: - args = args if isinstance(args, tuple) else tuple(args or ()) - args = (self.__self__, ) + args - return app.send_task( - self.name, args, kwargs, task_id=task_id, producer=producer, - link=link, link_error=link_error, result_cls=self.AsyncResult, - **dict(self._get_exec_options(), **options) - ) - - def subtask_from_request(self, request=None, args=None, kwargs=None, - queue=None, **extra_options): - request = self.request if request is None else request - args = request.args if args is None else args - kwargs = request.kwargs if kwargs is None else kwargs - limit_hard, limit_soft = request.timelimit or (None, None) - options = { - 'task_id': request.id, - 'link': request.callbacks, - 'link_error': request.errbacks, - 'group_id': request.group, - 'chord': request.chord, - 'soft_time_limit': limit_soft, - 'time_limit': limit_hard, - 'reply_to': request.reply_to, - 'headers': request.headers, - } - options.update( - {'queue': queue} if queue else (request.delivery_info or {}) - ) - return self.subtask(args, kwargs, options, type=self, **extra_options) - - def retry(self, args=None, kwargs=None, exc=None, throw=True, - eta=None, countdown=None, max_retries=None, **options): - """Retry the task. - - :param args: Positional arguments to retry with. - :param kwargs: Keyword arguments to retry with. - :keyword exc: Custom exception to report when the max restart - limit has been exceeded (default: - :exc:`~@MaxRetriesExceededError`). - - If this argument is set and retry is called while - an exception was raised (``sys.exc_info()`` is set) - it will attempt to reraise the current exception. - - If no exception was raised it will raise the ``exc`` - argument provided. - :keyword countdown: Time in seconds to delay the retry for. - :keyword eta: Explicit time and date to run the retry at - (must be a :class:`~datetime.datetime` instance). - :keyword max_retries: If set, overrides the default retry limit for - this execution. Changes to this parameter do not propagate to - subsequent task retry attempts. A value of :const:`None`, means - "use the default", so if you want infinite retries you would - have to set the :attr:`max_retries` attribute of the task to - :const:`None` first. - :keyword time_limit: If set, overrides the default time limit. - :keyword soft_time_limit: If set, overrides the default soft - time limit. - :keyword \*\*options: Any extra options to pass on to - meth:`apply_async`. - :keyword throw: If this is :const:`False`, do not raise the - :exc:`~@Retry` exception, - that tells the worker to mark the task as being - retried. Note that this means the task will be - marked as failed if the task raises an exception, - or successful if it returns. - - :raises celery.exceptions.Retry: To tell the worker that - the task has been re-sent for retry. This always happens, - unless the `throw` keyword argument has been explicitly set - to :const:`False`, and is considered normal operation. - - **Example** - - .. code-block:: python - - >>> from imaginary_twitter_lib import Twitter - >>> from proj.celery import app - - >>> @app.task(bind=True) - ... def tweet(self, auth, message): - ... twitter = Twitter(oauth=auth) - ... try: - ... twitter.post_status_update(message) - ... except twitter.FailWhale as exc: - ... # Retry in 5 minutes. - ... raise self.retry(countdown=60 * 5, exc=exc) - - Although the task will never return above as `retry` raises an - exception to notify the worker, we use `raise` in front of the retry - to convey that the rest of the block will not be executed. - - """ - request = self.request - retries = request.retries + 1 - max_retries = self.max_retries if max_retries is None else max_retries - - # Not in worker or emulated by (apply/always_eager), - # so just raise the original exception. - if request.called_directly: - maybe_reraise() # raise orig stack if PyErr_Occurred - raise exc or Retry('Task can be retried', None) - - if not eta and countdown is None: - countdown = self.default_retry_delay - - is_eager = request.is_eager - S = self.subtask_from_request( - request, args, kwargs, - countdown=countdown, eta=eta, retries=retries, - **options - ) - - if max_retries is not None and retries > max_retries: - if exc: - # first try to reraise the original exception - maybe_reraise() - # or if not in an except block then raise the custom exc. - raise exc - raise self.MaxRetriesExceededError( - "Can't retry {0}[{1}] args:{2} kwargs:{3}".format( - self.name, request.id, S.args, S.kwargs)) - - ret = Retry(exc=exc, when=eta or countdown) - - if is_eager: - # if task was executed eagerly using apply(), - # then the retry must also be executed eagerly. - S.apply().get() - return ret - - try: - S.apply_async() - except Exception as exc: - raise Reject(exc, requeue=False) - if throw: - raise ret - return ret - - def apply(self, args=None, kwargs=None, - link=None, link_error=None, **options): - """Execute this task locally, by blocking until the task returns. - - :param args: positional arguments passed on to the task. - :param kwargs: keyword arguments passed on to the task. - :keyword throw: Re-raise task exceptions. Defaults to - the :setting:`CELERY_EAGER_PROPAGATES_EXCEPTIONS` - setting. - - :rtype :class:`celery.result.EagerResult`: - - """ - # trace imports Task, so need to import inline. - from celery.app.trace import eager_trace_task - - app = self._get_app() - args = args or () - # add 'self' if this is a bound method. - if self.__self__ is not None: - args = (self.__self__, ) + tuple(args) - kwargs = kwargs or {} - task_id = options.get('task_id') or uuid() - retries = options.get('retries', 0) - throw = app.either('CELERY_EAGER_PROPAGATES_EXCEPTIONS', - options.pop('throw', None)) - - # Make sure we get the task instance, not class. - task = app._tasks[self.name] - - request = {'id': task_id, - 'retries': retries, - 'is_eager': True, - 'logfile': options.get('logfile'), - 'loglevel': options.get('loglevel', 0), - 'callbacks': maybe_list(link), - 'errbacks': maybe_list(link_error), - 'headers': options.get('headers'), - 'delivery_info': {'is_eager': True}} - if self.accept_magic_kwargs: - default_kwargs = {'task_name': task.name, - 'task_id': task_id, - 'task_retries': retries, - 'task_is_eager': True, - 'logfile': options.get('logfile'), - 'loglevel': options.get('loglevel', 0), - 'delivery_info': {'is_eager': True}} - supported_keys = fun_takes_kwargs(task.run, default_kwargs) - extend_with = dict((key, val) - for key, val in items(default_kwargs) - if key in supported_keys) - kwargs.update(extend_with) - - tb = None - retval, info = eager_trace_task(task, task_id, args, kwargs, - app=self._get_app(), - request=request, propagate=throw) - if isinstance(retval, ExceptionInfo): - retval, tb = retval.exception, retval.traceback - state = states.SUCCESS if info is None else info.state - return EagerResult(task_id, retval, state, traceback=tb) - - def AsyncResult(self, task_id, **kwargs): - """Get AsyncResult instance for this kind of task. - - :param task_id: Task id to get result for. - - """ - return self._get_app().AsyncResult(task_id, backend=self.backend, - task_name=self.name, **kwargs) - - def subtask(self, args=None, *starargs, **starkwargs): - """Return :class:`~celery.signature` object for - this task, wrapping arguments and execution options - for a single task invocation.""" - starkwargs.setdefault('app', self.app) - return signature(self, args, *starargs, **starkwargs) - - def s(self, *args, **kwargs): - """``.s(*a, **k) -> .subtask(a, k)``""" - return self.subtask(args, kwargs) - - def si(self, *args, **kwargs): - """``.si(*a, **k) -> .subtask(a, k, immutable=True)``""" - return self.subtask(args, kwargs, immutable=True) - - def chunks(self, it, n): - """Creates a :class:`~celery.canvas.chunks` task for this task.""" - from celery import chunks - return chunks(self.s(), it, n, app=self.app) - - def map(self, it): - """Creates a :class:`~celery.canvas.xmap` task from ``it``.""" - from celery import xmap - return xmap(self.s(), it, app=self.app) - - def starmap(self, it): - """Creates a :class:`~celery.canvas.xstarmap` task from ``it``.""" - from celery import xstarmap - return xstarmap(self.s(), it, app=self.app) - - def send_event(self, type_, **fields): - req = self.request - with self.app.events.default_dispatcher(hostname=req.hostname) as d: - return d.send(type_, uuid=req.id, **fields) - - def update_state(self, task_id=None, state=None, meta=None): - """Update task state. - - :keyword task_id: Id of the task to update, defaults to the - id of the current task - :keyword state: New state (:class:`str`). - :keyword meta: State metadata (:class:`dict`). - - - - """ - if task_id is None: - task_id = self.request.id - self.backend.store_result(task_id, meta, state) - - def on_success(self, retval, task_id, args, kwargs): - """Success handler. - - Run by the worker if the task executes successfully. - - :param retval: The return value of the task. - :param task_id: Unique id of the executed task. - :param args: Original arguments for the executed task. - :param kwargs: Original keyword arguments for the executed task. - - The return value of this handler is ignored. - - """ - pass - - def on_retry(self, exc, task_id, args, kwargs, einfo): - """Retry handler. - - This is run by the worker when the task is to be retried. - - :param exc: The exception sent to :meth:`retry`. - :param task_id: Unique id of the retried task. - :param args: Original arguments for the retried task. - :param kwargs: Original keyword arguments for the retried task. - - :keyword einfo: :class:`~billiard.einfo.ExceptionInfo` - instance, containing the traceback. - - The return value of this handler is ignored. - - """ - pass - - def on_failure(self, exc, task_id, args, kwargs, einfo): - """Error handler. - - This is run by the worker when the task fails. - - :param exc: The exception raised by the task. - :param task_id: Unique id of the failed task. - :param args: Original arguments for the task that failed. - :param kwargs: Original keyword arguments for the task - that failed. - - :keyword einfo: :class:`~billiard.einfo.ExceptionInfo` - instance, containing the traceback. - - The return value of this handler is ignored. - - """ - pass - - def after_return(self, status, retval, task_id, args, kwargs, einfo): - """Handler called after the task returns. - - :param status: Current task state. - :param retval: Task return value/exception. - :param task_id: Unique id of the task. - :param args: Original arguments for the task. - :param kwargs: Original keyword arguments for the task. - - :keyword einfo: :class:`~billiard.einfo.ExceptionInfo` - instance, containing the traceback (if any). - - The return value of this handler is ignored. - - """ - pass - - def send_error_email(self, context, exc, **kwargs): - if self.send_error_emails and \ - not getattr(self, 'disable_error_emails', None): - self.ErrorMail(self, **kwargs).send(context, exc) - - def add_trail(self, result): - if self.trail: - self.request.children.append(result) - return result - - def push_request(self, *args, **kwargs): - self.request_stack.push(Context(*args, **kwargs)) - - def pop_request(self): - self.request_stack.pop() - - def __repr__(self): - """`repr(task)`""" - return _reprtask(self, R_SELF_TASK if self.__self__ else R_INSTANCE) - - def _get_request(self): - """Get current request object.""" - req = self.request_stack.top - if req is None: - # task was not called, but some may still expect a request - # to be there, perhaps that should be deprecated. - if self._default_request is None: - self._default_request = Context() - return self._default_request - return req - request = property(_get_request) - - def _get_exec_options(self): - if self._exec_options is None: - self._exec_options = extract_exec_options(self) - return self._exec_options - - @property - def backend(self): - backend = self._backend - if backend is None: - return self.app.backend - return backend - - @backend.setter - def backend(self, value): # noqa - self._backend = value - - @property - def __name__(self): - return self.__class__.__name__ -BaseTask = Task # compat alias diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/trace.py b/thesisenv/lib/python3.6/site-packages/celery/app/trace.py deleted file mode 100644 index feea0e8..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/trace.py +++ /dev/null @@ -1,441 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.app.trace - ~~~~~~~~~~~~~~~~ - - This module defines how the task execution is traced: - errors are recorded, handlers are applied and so on. - -""" -from __future__ import absolute_import - -# ## --- -# This is the heart of the worker, the inner loop so to speak. -# It used to be split up into nice little classes and methods, -# but in the end it only resulted in bad performance and horrible tracebacks, -# so instead we now use one closure per task class. - -import os -import socket -import sys - -from warnings import warn - -from billiard.einfo import ExceptionInfo -from kombu.exceptions import EncodeError -from kombu.utils import kwdict - -from celery import current_app, group -from celery import states, signals -from celery._state import _task_stack -from celery.app import set_default_app -from celery.app.task import Task as BaseTask, Context -from celery.exceptions import Ignore, Reject, Retry -from celery.utils.log import get_logger -from celery.utils.objects import mro_lookup -from celery.utils.serialization import ( - get_pickleable_exception, - get_pickleable_etype, -) - -__all__ = ['TraceInfo', 'build_tracer', 'trace_task', 'eager_trace_task', - 'setup_worker_optimizations', 'reset_worker_optimizations'] - -_logger = get_logger(__name__) - -send_prerun = signals.task_prerun.send -send_postrun = signals.task_postrun.send -send_success = signals.task_success.send -STARTED = states.STARTED -SUCCESS = states.SUCCESS -IGNORED = states.IGNORED -REJECTED = states.REJECTED -RETRY = states.RETRY -FAILURE = states.FAILURE -EXCEPTION_STATES = states.EXCEPTION_STATES -IGNORE_STATES = frozenset([IGNORED, RETRY, REJECTED]) - -#: set by :func:`setup_worker_optimizations` -_tasks = None -_patched = {} - - -def task_has_custom(task, attr): - """Return true if the task or one of its bases - defines ``attr`` (excluding the one in BaseTask).""" - return mro_lookup(task.__class__, attr, stop=(BaseTask, object), - monkey_patched=['celery.app.task']) - - -class TraceInfo(object): - __slots__ = ('state', 'retval') - - def __init__(self, state, retval=None): - self.state = state - self.retval = retval - - def handle_error_state(self, task, eager=False): - store_errors = not eager - if task.ignore_result: - store_errors = task.store_errors_even_if_ignored - - return { - RETRY: self.handle_retry, - FAILURE: self.handle_failure, - }[self.state](task, store_errors=store_errors) - - def handle_retry(self, task, store_errors=True): - """Handle retry exception.""" - # the exception raised is the Retry semi-predicate, - # and it's exc' attribute is the original exception raised (if any). - req = task.request - type_, _, tb = sys.exc_info() - try: - reason = self.retval - einfo = ExceptionInfo((type_, reason, tb)) - if store_errors: - task.backend.mark_as_retry( - req.id, reason.exc, einfo.traceback, request=req, - ) - task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo) - signals.task_retry.send(sender=task, request=req, - reason=reason, einfo=einfo) - return einfo - finally: - del(tb) - - def handle_failure(self, task, store_errors=True): - """Handle exception.""" - req = task.request - type_, _, tb = sys.exc_info() - try: - exc = self.retval - einfo = ExceptionInfo() - einfo.exception = get_pickleable_exception(einfo.exception) - einfo.type = get_pickleable_etype(einfo.type) - if store_errors: - task.backend.mark_as_failure( - req.id, exc, einfo.traceback, request=req, - ) - task.on_failure(exc, req.id, req.args, req.kwargs, einfo) - signals.task_failure.send(sender=task, task_id=req.id, - exception=exc, args=req.args, - kwargs=req.kwargs, - traceback=tb, - einfo=einfo) - return einfo - finally: - del(tb) - - -def build_tracer(name, task, loader=None, hostname=None, store_errors=True, - Info=TraceInfo, eager=False, propagate=False, app=None, - IGNORE_STATES=IGNORE_STATES): - """Return a function that traces task execution; catches all - exceptions and updates result backend with the state and result - - If the call was successful, it saves the result to the task result - backend, and sets the task status to `"SUCCESS"`. - - If the call raises :exc:`~@Retry`, it extracts - the original exception, uses that as the result and sets the task state - to `"RETRY"`. - - If the call results in an exception, it saves the exception as the task - result, and sets the task state to `"FAILURE"`. - - Return a function that takes the following arguments: - - :param uuid: The id of the task. - :param args: List of positional args to pass on to the function. - :param kwargs: Keyword arguments mapping to pass on to the function. - :keyword request: Request dict. - - """ - # If the task doesn't define a custom __call__ method - # we optimize it away by simply calling the run method directly, - # saving the extra method call and a line less in the stack trace. - fun = task if task_has_custom(task, '__call__') else task.run - - loader = loader or app.loader - backend = task.backend - ignore_result = task.ignore_result - track_started = task.track_started - track_started = not eager and (task.track_started and not ignore_result) - publish_result = not eager and not ignore_result - hostname = hostname or socket.gethostname() - - loader_task_init = loader.on_task_init - loader_cleanup = loader.on_process_cleanup - - task_on_success = None - task_after_return = None - if task_has_custom(task, 'on_success'): - task_on_success = task.on_success - if task_has_custom(task, 'after_return'): - task_after_return = task.after_return - - store_result = backend.store_result - backend_cleanup = backend.process_cleanup - - pid = os.getpid() - - request_stack = task.request_stack - push_request = request_stack.push - pop_request = request_stack.pop - push_task = _task_stack.push - pop_task = _task_stack.pop - on_chord_part_return = backend.on_chord_part_return - - prerun_receivers = signals.task_prerun.receivers - postrun_receivers = signals.task_postrun.receivers - success_receivers = signals.task_success.receivers - - from celery import canvas - signature = canvas.maybe_signature # maybe_ does not clone if already - - def on_error(request, exc, uuid, state=FAILURE, call_errbacks=True): - if propagate: - raise - I = Info(state, exc) - R = I.handle_error_state(task, eager=eager) - if call_errbacks: - group( - [signature(errback, app=app) - for errback in request.errbacks or []], app=app, - ).apply_async((uuid, )) - return I, R, I.state, I.retval - - def trace_task(uuid, args, kwargs, request=None): - # R - is the possibly prepared return value. - # I - is the Info object. - # retval - is the always unmodified return value. - # state - is the resulting task state. - - # This function is very long because we have unrolled all the calls - # for performance reasons, and because the function is so long - # we want the main variables (I, and R) to stand out visually from the - # the rest of the variables, so breaking PEP8 is worth it ;) - R = I = retval = state = None - kwargs = kwdict(kwargs) - try: - push_task(task) - task_request = Context(request or {}, args=args, - called_directly=False, kwargs=kwargs) - push_request(task_request) - try: - # -*- PRE -*- - if prerun_receivers: - send_prerun(sender=task, task_id=uuid, task=task, - args=args, kwargs=kwargs) - loader_task_init(uuid, task) - if track_started: - store_result( - uuid, {'pid': pid, 'hostname': hostname}, STARTED, - request=task_request, - ) - - # -*- TRACE -*- - try: - R = retval = fun(*args, **kwargs) - state = SUCCESS - except Reject as exc: - I, R = Info(REJECTED, exc), ExceptionInfo(internal=True) - state, retval = I.state, I.retval - except Ignore as exc: - I, R = Info(IGNORED, exc), ExceptionInfo(internal=True) - state, retval = I.state, I.retval - except Retry as exc: - I, R, state, retval = on_error( - task_request, exc, uuid, RETRY, call_errbacks=False, - ) - except Exception as exc: - I, R, state, retval = on_error(task_request, exc, uuid) - except BaseException as exc: - raise - else: - try: - # callback tasks must be applied before the result is - # stored, so that result.children is populated. - - # groups are called inline and will store trail - # separately, so need to call them separately - # so that the trail's not added multiple times :( - # (Issue #1936) - callbacks = task.request.callbacks - if callbacks: - if len(task.request.callbacks) > 1: - sigs, groups = [], [] - for sig in callbacks: - sig = signature(sig, app=app) - if isinstance(sig, group): - groups.append(sig) - else: - sigs.append(sig) - for group_ in groups: - group_.apply_async((retval, )) - if sigs: - group(sigs).apply_async((retval, )) - else: - signature(callbacks[0], app=app).delay(retval) - if publish_result: - store_result( - uuid, retval, SUCCESS, request=task_request, - ) - except EncodeError as exc: - I, R, state, retval = on_error(task_request, exc, uuid) - else: - if task_on_success: - task_on_success(retval, uuid, args, kwargs) - if success_receivers: - send_success(sender=task, result=retval) - - # -* POST *- - if state not in IGNORE_STATES: - if task_request.chord: - on_chord_part_return(task, state, R) - if task_after_return: - task_after_return( - state, retval, uuid, args, kwargs, None, - ) - finally: - try: - if postrun_receivers: - send_postrun(sender=task, task_id=uuid, task=task, - args=args, kwargs=kwargs, - retval=retval, state=state) - finally: - pop_task() - pop_request() - if not eager: - try: - backend_cleanup() - loader_cleanup() - except (KeyboardInterrupt, SystemExit, MemoryError): - raise - except Exception as exc: - _logger.error('Process cleanup failed: %r', exc, - exc_info=True) - except MemoryError: - raise - except Exception as exc: - if eager: - raise - R = report_internal_error(task, exc) - return R, I - - return trace_task - - -def trace_task(task, uuid, args, kwargs, request={}, **opts): - try: - if task.__trace__ is None: - task.__trace__ = build_tracer(task.name, task, **opts) - return task.__trace__(uuid, args, kwargs, request)[0] - except Exception as exc: - return report_internal_error(task, exc) - - -def _trace_task_ret(name, uuid, args, kwargs, request={}, app=None, **opts): - app = app or current_app - return trace_task(app.tasks[name], - uuid, args, kwargs, request, app=app, **opts) -trace_task_ret = _trace_task_ret - - -def _fast_trace_task(task, uuid, args, kwargs, request={}): - # setup_worker_optimizations will point trace_task_ret to here, - # so this is the function used in the worker. - return _tasks[task].__trace__(uuid, args, kwargs, request)[0] - - -def eager_trace_task(task, uuid, args, kwargs, request=None, **opts): - opts.setdefault('eager', True) - return build_tracer(task.name, task, **opts)( - uuid, args, kwargs, request) - - -def report_internal_error(task, exc): - _type, _value, _tb = sys.exc_info() - try: - _value = task.backend.prepare_exception(exc, 'pickle') - exc_info = ExceptionInfo((_type, _value, _tb), internal=True) - warn(RuntimeWarning( - 'Exception raised outside body: {0!r}:\n{1}'.format( - exc, exc_info.traceback))) - return exc_info - finally: - del(_tb) - - -def setup_worker_optimizations(app): - global _tasks - global trace_task_ret - - # make sure custom Task.__call__ methods that calls super - # will not mess up the request/task stack. - _install_stack_protection() - - # all new threads start without a current app, so if an app is not - # passed on to the thread it will fall back to the "default app", - # which then could be the wrong app. So for the worker - # we set this to always return our app. This is a hack, - # and means that only a single app can be used for workers - # running in the same process. - app.set_current() - set_default_app(app) - - # evaluate all task classes by finalizing the app. - app.finalize() - - # set fast shortcut to task registry - _tasks = app._tasks - - trace_task_ret = _fast_trace_task - from celery.worker import job as job_module - job_module.trace_task_ret = _fast_trace_task - job_module.__optimize__() - - -def reset_worker_optimizations(): - global trace_task_ret - trace_task_ret = _trace_task_ret - try: - delattr(BaseTask, '_stackprotected') - except AttributeError: - pass - try: - BaseTask.__call__ = _patched.pop('BaseTask.__call__') - except KeyError: - pass - from celery.worker import job as job_module - job_module.trace_task_ret = _trace_task_ret - - -def _install_stack_protection(): - # Patches BaseTask.__call__ in the worker to handle the edge case - # where people override it and also call super. - # - # - The worker optimizes away BaseTask.__call__ and instead - # calls task.run directly. - # - so with the addition of current_task and the request stack - # BaseTask.__call__ now pushes to those stacks so that - # they work when tasks are called directly. - # - # The worker only optimizes away __call__ in the case - # where it has not been overridden, so the request/task stack - # will blow if a custom task class defines __call__ and also - # calls super(). - if not getattr(BaseTask, '_stackprotected', False): - _patched['BaseTask.__call__'] = orig = BaseTask.__call__ - - def __protected_call__(self, *args, **kwargs): - stack = self.request_stack - req = stack.top - if req and not req._protected and \ - len(stack) == 1 and not req.called_directly: - req._protected = 1 - return self.run(*args, **kwargs) - return orig(self, *args, **kwargs) - BaseTask.__call__ = __protected_call__ - BaseTask._stackprotected = True diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/utils.py b/thesisenv/lib/python3.6/site-packages/celery/app/utils.py deleted file mode 100644 index b76290b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/utils.py +++ /dev/null @@ -1,266 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.app.utils - ~~~~~~~~~~~~~~~~ - - App utilities: Compat settings, bugreport tool, pickling apps. - -""" -from __future__ import absolute_import - -import os -import platform as _platform -import re - -from collections import Mapping -from types import ModuleType - -from kombu.utils.url import maybe_sanitize_url - -from celery.datastructures import ConfigurationView -from celery.five import items, string_t, values -from celery.platforms import pyimplementation -from celery.utils.text import pretty -from celery.utils.imports import import_from_cwd, symbol_by_name, qualname - -from .defaults import find - -__all__ = ['Settings', 'appstr', 'bugreport', - 'filter_hidden_settings', 'find_app'] - -#: Format used to generate bugreport information. -BUGREPORT_INFO = """ -software -> celery:{celery_v} kombu:{kombu_v} py:{py_v} - billiard:{billiard_v} {driver_v} -platform -> system:{system} arch:{arch} imp:{py_i} -loader -> {loader} -settings -> transport:{transport} results:{results} - -{human_settings} -""" - -HIDDEN_SETTINGS = re.compile( - 'API|TOKEN|KEY|SECRET|PASS|PROFANITIES_LIST|SIGNATURE|DATABASE', - re.IGNORECASE, -) - - -def appstr(app): - """String used in __repr__ etc, to id app instances.""" - return '{0}:0x{1:x}'.format(app.main or '__main__', id(app)) - - -class Settings(ConfigurationView): - """Celery settings object. - - .. seealso: - - :ref:`configuration` for a full list of configuration keys. - - """ - - @property - def CELERY_RESULT_BACKEND(self): - return self.first('CELERY_RESULT_BACKEND', 'CELERY_BACKEND') - - @property - def BROKER_TRANSPORT(self): - return self.first('BROKER_TRANSPORT', - 'BROKER_BACKEND', 'CARROT_BACKEND') - - @property - def BROKER_BACKEND(self): - """Deprecated compat alias to :attr:`BROKER_TRANSPORT`.""" - return self.BROKER_TRANSPORT - - @property - def BROKER_URL(self): - return (os.environ.get('CELERY_BROKER_URL') or - self.first('BROKER_URL', 'BROKER_HOST')) - - @property - def CELERY_TIMEZONE(self): - # this way we also support django's time zone. - return self.first('CELERY_TIMEZONE', 'TIME_ZONE') - - def without_defaults(self): - """Return the current configuration, but without defaults.""" - # the last stash is the default settings, so just skip that - return Settings({}, self._order[:-1]) - - def value_set_for(self, key): - return key in self.without_defaults() - - def find_option(self, name, namespace='celery'): - """Search for option by name. - - Will return ``(namespace, key, type)`` tuple, e.g.:: - - >>> from proj.celery import app - >>> app.conf.find_option('disable_rate_limits') - ('CELERY', 'DISABLE_RATE_LIMITS', - bool default->False>)) - - :param name: Name of option, cannot be partial. - :keyword namespace: Preferred namespace (``CELERY`` by default). - - """ - return find(name, namespace) - - def find_value_for_key(self, name, namespace='celery'): - """Shortcut to ``get_by_parts(*find_option(name)[:-1])``""" - return self.get_by_parts(*self.find_option(name, namespace)[:-1]) - - def get_by_parts(self, *parts): - """Return the current value for setting specified as a path. - - Example:: - - >>> from proj.celery import app - >>> app.conf.get_by_parts('CELERY', 'DISABLE_RATE_LIMITS') - False - - """ - return self['_'.join(part for part in parts if part)] - - def table(self, with_defaults=False, censored=True): - filt = filter_hidden_settings if censored else lambda v: v - return filt(dict( - (k, v) for k, v in items( - self if with_defaults else self.without_defaults()) - if k.isupper() and not k.startswith('_') - )) - - def humanize(self, with_defaults=False, censored=True): - """Return a human readable string showing changes to the - configuration.""" - return '\n'.join( - '{0}: {1}'.format(key, pretty(value, width=50)) - for key, value in items(self.table(with_defaults, censored))) - - -class AppPickler(object): - """Old application pickler/unpickler (< 3.1).""" - - def __call__(self, cls, *args): - kwargs = self.build_kwargs(*args) - app = self.construct(cls, **kwargs) - self.prepare(app, **kwargs) - return app - - def prepare(self, app, **kwargs): - app.conf.update(kwargs['changes']) - - def build_kwargs(self, *args): - return self.build_standard_kwargs(*args) - - def build_standard_kwargs(self, main, changes, loader, backend, amqp, - events, log, control, accept_magic_kwargs, - config_source=None): - return dict(main=main, loader=loader, backend=backend, amqp=amqp, - changes=changes, events=events, log=log, control=control, - set_as_current=False, - accept_magic_kwargs=accept_magic_kwargs, - config_source=config_source) - - def construct(self, cls, **kwargs): - return cls(**kwargs) - - -def _unpickle_app(cls, pickler, *args): - """Rebuild app for versions 2.5+""" - return pickler()(cls, *args) - - -def _unpickle_app_v2(cls, kwargs): - """Rebuild app for versions 3.1+""" - kwargs['set_as_current'] = False - return cls(**kwargs) - - -def filter_hidden_settings(conf): - - def maybe_censor(key, value, mask='*' * 8): - if isinstance(value, Mapping): - return filter_hidden_settings(value) - if isinstance(key, string_t): - if HIDDEN_SETTINGS.search(key): - return mask - elif 'BROKER_URL' in key.upper(): - from kombu import Connection - return Connection(value).as_uri(mask=mask) - elif key.upper() in ('CELERY_RESULT_BACKEND', 'CELERY_BACKEND'): - return maybe_sanitize_url(value, mask=mask) - - return value - - return dict((k, maybe_censor(k, v)) for k, v in items(conf)) - - -def bugreport(app): - """Return a string containing information useful in bug reports.""" - import billiard - import celery - import kombu - - try: - conn = app.connection() - driver_v = '{0}:{1}'.format(conn.transport.driver_name, - conn.transport.driver_version()) - transport = conn.transport_cls - except Exception: - transport = driver_v = '' - - return BUGREPORT_INFO.format( - system=_platform.system(), - arch=', '.join(x for x in _platform.architecture() if x), - py_i=pyimplementation(), - celery_v=celery.VERSION_BANNER, - kombu_v=kombu.__version__, - billiard_v=billiard.__version__, - py_v=_platform.python_version(), - driver_v=driver_v, - transport=transport, - results=maybe_sanitize_url( - app.conf.CELERY_RESULT_BACKEND or 'disabled'), - human_settings=app.conf.humanize(), - loader=qualname(app.loader.__class__), - ) - - -def find_app(app, symbol_by_name=symbol_by_name, imp=import_from_cwd): - from .base import Celery - - try: - sym = symbol_by_name(app, imp=imp) - except AttributeError: - # last part was not an attribute, but a module - sym = imp(app) - if isinstance(sym, ModuleType) and ':' not in app: - try: - found = sym.app - if isinstance(found, ModuleType): - raise AttributeError() - except AttributeError: - try: - found = sym.celery - if isinstance(found, ModuleType): - raise AttributeError() - except AttributeError: - if getattr(sym, '__path__', None): - try: - return find_app( - '{0}.celery'.format(app), - symbol_by_name=symbol_by_name, imp=imp, - ) - except ImportError: - pass - for suspect in values(vars(sym)): - if isinstance(suspect, Celery): - return suspect - raise - else: - return found - else: - return found - return sym diff --git a/thesisenv/lib/python3.6/site-packages/celery/apps/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/apps/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/thesisenv/lib/python3.6/site-packages/celery/apps/beat.py b/thesisenv/lib/python3.6/site-packages/celery/apps/beat.py deleted file mode 100644 index 46cef9b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/apps/beat.py +++ /dev/null @@ -1,151 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.apps.beat - ~~~~~~~~~~~~~~~~ - - This module is the 'program-version' of :mod:`celery.beat`. - - It does everything necessary to run that module - as an actual application, like installing signal handlers - and so on. - -""" -from __future__ import absolute_import, unicode_literals - -import numbers -import socket -import sys - -from celery import VERSION_BANNER, platforms, beat -from celery.utils.imports import qualname -from celery.utils.log import LOG_LEVELS, get_logger -from celery.utils.timeutils import humanize_seconds - -__all__ = ['Beat'] - -STARTUP_INFO_FMT = """ -Configuration -> - . broker -> {conninfo} - . loader -> {loader} - . scheduler -> {scheduler} -{scheduler_info} - . logfile -> {logfile}@%{loglevel} - . maxinterval -> {hmax_interval} ({max_interval}s) -""".strip() - -logger = get_logger('celery.beat') - - -class Beat(object): - Service = beat.Service - app = None - - def __init__(self, max_interval=None, app=None, - socket_timeout=30, pidfile=None, no_color=None, - loglevel=None, logfile=None, schedule=None, - scheduler_cls=None, redirect_stdouts=None, - redirect_stdouts_level=None, **kwargs): - """Starts the beat task scheduler.""" - self.app = app = app or self.app - self.loglevel = self._getopt('log_level', loglevel) - self.logfile = self._getopt('log_file', logfile) - self.schedule = self._getopt('schedule_filename', schedule) - self.scheduler_cls = self._getopt('scheduler', scheduler_cls) - self.redirect_stdouts = self._getopt( - 'redirect_stdouts', redirect_stdouts, - ) - self.redirect_stdouts_level = self._getopt( - 'redirect_stdouts_level', redirect_stdouts_level, - ) - - self.max_interval = max_interval - self.socket_timeout = socket_timeout - self.no_color = no_color - self.colored = app.log.colored( - self.logfile, - enabled=not no_color if no_color is not None else no_color, - ) - self.pidfile = pidfile - - if not isinstance(self.loglevel, numbers.Integral): - self.loglevel = LOG_LEVELS[self.loglevel.upper()] - - def _getopt(self, key, value): - if value is not None: - return value - return self.app.conf.find_value_for_key(key, namespace='celerybeat') - - def run(self): - print(str(self.colored.cyan( - 'celery beat v{0} is starting.'.format(VERSION_BANNER)))) - self.init_loader() - self.set_process_title() - self.start_scheduler() - - def setup_logging(self, colorize=None): - if colorize is None and self.no_color is not None: - colorize = not self.no_color - self.app.log.setup(self.loglevel, self.logfile, - self.redirect_stdouts, self.redirect_stdouts_level, - colorize=colorize) - - def start_scheduler(self): - c = self.colored - if self.pidfile: - platforms.create_pidlock(self.pidfile) - beat = self.Service(app=self.app, - max_interval=self.max_interval, - scheduler_cls=self.scheduler_cls, - schedule_filename=self.schedule) - - print(str(c.blue('__ ', c.magenta('-'), - c.blue(' ... __ '), c.magenta('-'), - c.blue(' _\n'), - c.reset(self.startup_info(beat))))) - self.setup_logging() - if self.socket_timeout: - logger.debug('Setting default socket timeout to %r', - self.socket_timeout) - socket.setdefaulttimeout(self.socket_timeout) - try: - self.install_sync_handler(beat) - beat.start() - except Exception as exc: - logger.critical('beat raised exception %s: %r', - exc.__class__, exc, - exc_info=True) - - def init_loader(self): - # Run the worker init handler. - # (Usually imports task modules and such.) - self.app.loader.init_worker() - self.app.finalize() - - def startup_info(self, beat): - scheduler = beat.get_scheduler(lazy=True) - return STARTUP_INFO_FMT.format( - conninfo=self.app.connection().as_uri(), - logfile=self.logfile or '[stderr]', - loglevel=LOG_LEVELS[self.loglevel], - loader=qualname(self.app.loader), - scheduler=qualname(scheduler), - scheduler_info=scheduler.info, - hmax_interval=humanize_seconds(beat.max_interval), - max_interval=beat.max_interval, - ) - - def set_process_title(self): - arg_start = 'manage' in sys.argv[0] and 2 or 1 - platforms.set_process_title( - 'celery beat', info=' '.join(sys.argv[arg_start:]), - ) - - def install_sync_handler(self, beat): - """Install a `SIGTERM` + `SIGINT` handler that saves - the beat schedule.""" - - def _sync(signum, frame): - beat.sync() - raise SystemExit() - - platforms.signals.update(SIGTERM=_sync, SIGINT=_sync) diff --git a/thesisenv/lib/python3.6/site-packages/celery/apps/worker.py b/thesisenv/lib/python3.6/site-packages/celery/apps/worker.py deleted file mode 100644 index 637a082..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/apps/worker.py +++ /dev/null @@ -1,372 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.apps.worker - ~~~~~~~~~~~~~~~~~~ - - This module is the 'program-version' of :mod:`celery.worker`. - - It does everything necessary to run that module - as an actual application, like installing signal handlers, - platform tweaks, and so on. - -""" -from __future__ import absolute_import, print_function, unicode_literals - -import logging -import os -import platform as _platform -import sys -import warnings - -from functools import partial - -from billiard import current_process -from kombu.utils.encoding import safe_str - -from celery import VERSION_BANNER, platforms, signals -from celery.app import trace -from celery.exceptions import ( - CDeprecationWarning, WorkerShutdown, WorkerTerminate, -) -from celery.five import string, string_t -from celery.loaders.app import AppLoader -from celery.platforms import check_privileges -from celery.utils import cry, isatty -from celery.utils.imports import qualname -from celery.utils.log import get_logger, in_sighandler, set_in_sighandler -from celery.utils.text import pluralize -from celery.worker import WorkController - -__all__ = ['Worker'] - -logger = get_logger(__name__) -is_jython = sys.platform.startswith('java') -is_pypy = hasattr(sys, 'pypy_version_info') - -W_PICKLE_DEPRECATED = """ -Starting from version 3.2 Celery will refuse to accept pickle by default. - -The pickle serializer is a security concern as it may give attackers -the ability to execute any command. It's important to secure -your broker from unauthorized access when using pickle, so we think -that enabling pickle should require a deliberate action and not be -the default choice. - -If you depend on pickle then you should set a setting to disable this -warning and to be sure that everything will continue working -when you upgrade to Celery 3.2:: - - CELERY_ACCEPT_CONTENT = ['pickle', 'json', 'msgpack', 'yaml'] - -You must only enable the serializers that you will actually use. - -""" - - -def active_thread_count(): - from threading import enumerate - return sum(1 for t in enumerate() - if not t.name.startswith('Dummy-')) - - -def safe_say(msg): - print('\n{0}'.format(msg), file=sys.__stderr__) - -ARTLINES = [ - ' --------------', - '---- **** -----', - '--- * *** * --', - '-- * - **** ---', - '- ** ----------', - '- ** ----------', - '- ** ----------', - '- ** ----------', - '- *** --- * ---', - '-- ******* ----', - '--- ***** -----', - ' --------------', -] - -BANNER = """\ -{hostname} v{version} - -{platform} - -[config] -.> app: {app} -.> transport: {conninfo} -.> results: {results} -.> concurrency: {concurrency} - -[queues] -{queues} -""" - -EXTRA_INFO_FMT = """ -[tasks] -{tasks} -""" - - -class Worker(WorkController): - - def on_before_init(self, **kwargs): - trace.setup_worker_optimizations(self.app) - - # this signal can be used to set up configuration for - # workers by name. - signals.celeryd_init.send( - sender=self.hostname, instance=self, - conf=self.app.conf, options=kwargs, - ) - check_privileges(self.app.conf.CELERY_ACCEPT_CONTENT) - - def on_after_init(self, purge=False, no_color=None, - redirect_stdouts=None, redirect_stdouts_level=None, - **kwargs): - self.redirect_stdouts = self._getopt( - 'redirect_stdouts', redirect_stdouts, - ) - self.redirect_stdouts_level = self._getopt( - 'redirect_stdouts_level', redirect_stdouts_level, - ) - super(Worker, self).setup_defaults(**kwargs) - self.purge = purge - self.no_color = no_color - self._isatty = isatty(sys.stdout) - self.colored = self.app.log.colored( - self.logfile, - enabled=not no_color if no_color is not None else no_color - ) - - def on_init_blueprint(self): - self._custom_logging = self.setup_logging() - # apply task execution optimizations - # -- This will finalize the app! - trace.setup_worker_optimizations(self.app) - - def on_start(self): - if not self._custom_logging and self.redirect_stdouts: - self.app.log.redirect_stdouts(self.redirect_stdouts_level) - - WorkController.on_start(self) - - # this signal can be used to e.g. change queues after - # the -Q option has been applied. - signals.celeryd_after_setup.send( - sender=self.hostname, instance=self, conf=self.app.conf, - ) - - if not self.app.conf.value_set_for('CELERY_ACCEPT_CONTENT'): - warnings.warn(CDeprecationWarning(W_PICKLE_DEPRECATED)) - - if self.purge: - self.purge_messages() - - # Dump configuration to screen so we have some basic information - # for when users sends bug reports. - print(safe_str(''.join([ - string(self.colored.cyan(' \n', self.startup_info())), - string(self.colored.reset(self.extra_info() or '')), - ])), file=sys.__stdout__) - self.set_process_status('-active-') - self.install_platform_tweaks(self) - - def on_consumer_ready(self, consumer): - signals.worker_ready.send(sender=consumer) - print('{0} ready.'.format(safe_str(self.hostname), )) - - def setup_logging(self, colorize=None): - if colorize is None and self.no_color is not None: - colorize = not self.no_color - return self.app.log.setup( - self.loglevel, self.logfile, - redirect_stdouts=False, colorize=colorize, hostname=self.hostname, - ) - - def purge_messages(self): - count = self.app.control.purge() - if count: - print('purge: Erased {0} {1} from the queue.\n'.format( - count, pluralize(count, 'message'))) - - def tasklist(self, include_builtins=True, sep='\n', int_='celery.'): - return sep.join( - ' . {0}'.format(task) for task in sorted(self.app.tasks) - if (not task.startswith(int_) if not include_builtins else task) - ) - - def extra_info(self): - if self.loglevel <= logging.INFO: - include_builtins = self.loglevel <= logging.DEBUG - tasklist = self.tasklist(include_builtins=include_builtins) - return EXTRA_INFO_FMT.format(tasks=tasklist) - - def startup_info(self): - app = self.app - concurrency = string(self.concurrency) - appr = '{0}:0x{1:x}'.format(app.main or '__main__', id(app)) - if not isinstance(app.loader, AppLoader): - loader = qualname(app.loader) - if loader.startswith('celery.loaders'): - loader = loader[14:] - appr += ' ({0})'.format(loader) - if self.autoscale: - max, min = self.autoscale - concurrency = '{{min={0}, max={1}}}'.format(min, max) - pool = self.pool_cls - if not isinstance(pool, string_t): - pool = pool.__module__ - concurrency += ' ({0})'.format(pool.split('.')[-1]) - events = 'ON' - if not self.send_events: - events = 'OFF (enable -E to monitor this worker)' - - banner = BANNER.format( - app=appr, - hostname=safe_str(self.hostname), - version=VERSION_BANNER, - conninfo=self.app.connection().as_uri(), - results=self.app.backend.as_uri(), - concurrency=concurrency, - platform=safe_str(_platform.platform()), - events=events, - queues=app.amqp.queues.format(indent=0, indent_first=False), - ).splitlines() - - # integrate the ASCII art. - for i, x in enumerate(banner): - try: - banner[i] = ' '.join([ARTLINES[i], banner[i]]) - except IndexError: - banner[i] = ' ' * 16 + banner[i] - return '\n'.join(banner) + '\n' - - def install_platform_tweaks(self, worker): - """Install platform specific tweaks and workarounds.""" - if self.app.IS_OSX: - self.osx_proxy_detection_workaround() - - # Install signal handler so SIGHUP restarts the worker. - if not self._isatty: - # only install HUP handler if detached from terminal, - # so closing the terminal window doesn't restart the worker - # into the background. - if self.app.IS_OSX: - # OS X can't exec from a process using threads. - # See http://github.com/celery/celery/issues#issue/152 - install_HUP_not_supported_handler(worker) - else: - install_worker_restart_handler(worker) - install_worker_term_handler(worker) - install_worker_term_hard_handler(worker) - install_worker_int_handler(worker) - install_cry_handler() - install_rdb_handler() - - def osx_proxy_detection_workaround(self): - """See http://github.com/celery/celery/issues#issue/161""" - os.environ.setdefault('celery_dummy_proxy', 'set_by_celeryd') - - def set_process_status(self, info): - return platforms.set_mp_process_title( - 'celeryd', - info='{0} ({1})'.format(info, platforms.strargv(sys.argv)), - hostname=self.hostname, - ) - - -def _shutdown_handler(worker, sig='TERM', how='Warm', - exc=WorkerShutdown, callback=None): - - def _handle_request(*args): - with in_sighandler(): - from celery.worker import state - if current_process()._name == 'MainProcess': - if callback: - callback(worker) - safe_say('worker: {0} shutdown (MainProcess)'.format(how)) - if active_thread_count() > 1: - setattr(state, {'Warm': 'should_stop', - 'Cold': 'should_terminate'}[how], True) - else: - raise exc() - _handle_request.__name__ = str('worker_{0}'.format(how)) - platforms.signals[sig] = _handle_request -install_worker_term_handler = partial( - _shutdown_handler, sig='SIGTERM', how='Warm', exc=WorkerShutdown, -) -if not is_jython: # pragma: no cover - install_worker_term_hard_handler = partial( - _shutdown_handler, sig='SIGQUIT', how='Cold', exc=WorkerTerminate, - ) -else: # pragma: no cover - install_worker_term_handler = \ - install_worker_term_hard_handler = lambda *a, **kw: None - - -def on_SIGINT(worker): - safe_say('worker: Hitting Ctrl+C again will terminate all running tasks!') - install_worker_term_hard_handler(worker, sig='SIGINT') -if not is_jython: # pragma: no cover - install_worker_int_handler = partial( - _shutdown_handler, sig='SIGINT', callback=on_SIGINT - ) -else: # pragma: no cover - def install_worker_int_handler(*a, **kw): - pass - - -def _reload_current_worker(): - platforms.close_open_fds([ - sys.__stdin__, sys.__stdout__, sys.__stderr__, - ]) - os.execv(sys.executable, [sys.executable] + sys.argv) - - -def install_worker_restart_handler(worker, sig='SIGHUP'): - - def restart_worker_sig_handler(*args): - """Signal handler restarting the current python program.""" - set_in_sighandler(True) - safe_say('Restarting celery worker ({0})'.format(' '.join(sys.argv))) - import atexit - atexit.register(_reload_current_worker) - from celery.worker import state - state.should_stop = True - platforms.signals[sig] = restart_worker_sig_handler - - -def install_cry_handler(sig='SIGUSR1'): - # Jython/PyPy does not have sys._current_frames - if is_jython or is_pypy: # pragma: no cover - return - - def cry_handler(*args): - """Signal handler logging the stacktrace of all active threads.""" - with in_sighandler(): - safe_say(cry()) - platforms.signals[sig] = cry_handler - - -def install_rdb_handler(envvar='CELERY_RDBSIG', - sig='SIGUSR2'): # pragma: no cover - - def rdb_handler(*args): - """Signal handler setting a rdb breakpoint at the current frame.""" - with in_sighandler(): - from celery.contrib.rdb import set_trace, _frame - # gevent does not pass standard signal handler args - frame = args[1] if args else _frame().f_back - set_trace(frame) - if os.environ.get(envvar): - platforms.signals[sig] = rdb_handler - - -def install_HUP_not_supported_handler(worker, sig='SIGHUP'): - - def warn_on_HUP_handler(signum, frame): - with in_sighandler(): - safe_say('{sig} not supported: Restarting with {sig} is ' - 'unstable on this platform!'.format(sig=sig)) - platforms.signals[sig] = warn_on_HUP_handler diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/backends/__init__.py deleted file mode 100644 index 44ee3b7..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/backends/__init__.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.backends - ~~~~~~~~~~~~~~~ - - Backend abstract factory (...did I just say that?) and alias definitions. - -""" -from __future__ import absolute_import - -import sys -import types - -from celery.exceptions import ImproperlyConfigured -from celery.local import Proxy -from celery._state import current_app -from celery.five import reraise -from celery.utils.imports import symbol_by_name - -__all__ = ['get_backend_cls', 'get_backend_by_url'] - -UNKNOWN_BACKEND = """\ -Unknown result backend: {0!r}. Did you spell that correctly? ({1!r})\ -""" - -BACKEND_ALIASES = { - 'amqp': 'celery.backends.amqp:AMQPBackend', - 'rpc': 'celery.backends.rpc.RPCBackend', - 'cache': 'celery.backends.cache:CacheBackend', - 'redis': 'celery.backends.redis:RedisBackend', - 'mongodb': 'celery.backends.mongodb:MongoBackend', - 'db': 'celery.backends.database:DatabaseBackend', - 'database': 'celery.backends.database:DatabaseBackend', - 'cassandra': 'celery.backends.cassandra:CassandraBackend', - 'couchbase': 'celery.backends.couchbase:CouchBaseBackend', - 'disabled': 'celery.backends.base:DisabledBackend', -} - -#: deprecated alias to ``current_app.backend``. -default_backend = Proxy(lambda: current_app.backend) - - -def get_backend_cls(backend=None, loader=None): - """Get backend class by name/alias""" - backend = backend or 'disabled' - loader = loader or current_app.loader - aliases = dict(BACKEND_ALIASES, **loader.override_backends) - try: - cls = symbol_by_name(backend, aliases) - except ValueError as exc: - reraise(ImproperlyConfigured, ImproperlyConfigured( - UNKNOWN_BACKEND.format(backend, exc)), sys.exc_info()[2]) - if isinstance(cls, types.ModuleType): - raise ImproperlyConfigured(UNKNOWN_BACKEND.format( - backend, 'is a Python module, not a backend class.')) - return cls - - -def get_backend_by_url(backend=None, loader=None): - url = None - if backend and '://' in backend: - url = backend - scheme, _, _ = url.partition('://') - if '+' in scheme: - backend, url = url.split('+', 1) - else: - backend = scheme - return get_backend_cls(backend, loader), url diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/amqp.py b/thesisenv/lib/python3.6/site-packages/celery/backends/amqp.py deleted file mode 100644 index 6e7f778..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/backends/amqp.py +++ /dev/null @@ -1,317 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.backends.amqp - ~~~~~~~~~~~~~~~~~~~~ - - The AMQP result backend. - - This backend publishes results as messages. - -""" -from __future__ import absolute_import - -import socket - -from collections import deque -from operator import itemgetter - -from kombu import Exchange, Queue, Producer, Consumer - -from celery import states -from celery.exceptions import TimeoutError -from celery.five import range, monotonic -from celery.utils.functional import dictfilter -from celery.utils.log import get_logger -from celery.utils.timeutils import maybe_s_to_ms - -from .base import BaseBackend - -__all__ = ['BacklogLimitExceeded', 'AMQPBackend'] - -logger = get_logger(__name__) - - -class BacklogLimitExceeded(Exception): - """Too much state history to fast-forward.""" - - -def repair_uuid(s): - # Historically the dashes in UUIDS are removed from AMQ entity names, - # but there is no known reason to. Hopefully we'll be able to fix - # this in v4.0. - return '%s-%s-%s-%s-%s' % (s[:8], s[8:12], s[12:16], s[16:20], s[20:]) - - -class NoCacheQueue(Queue): - can_cache_declaration = False - - -class AMQPBackend(BaseBackend): - """Publishes results by sending messages.""" - Exchange = Exchange - Queue = NoCacheQueue - Consumer = Consumer - Producer = Producer - - BacklogLimitExceeded = BacklogLimitExceeded - - persistent = True - supports_autoexpire = True - supports_native_join = True - - retry_policy = { - 'max_retries': 20, - 'interval_start': 0, - 'interval_step': 1, - 'interval_max': 1, - } - - def __init__(self, app, connection=None, exchange=None, exchange_type=None, - persistent=None, serializer=None, auto_delete=True, **kwargs): - super(AMQPBackend, self).__init__(app, **kwargs) - conf = self.app.conf - self._connection = connection - self.persistent = self.prepare_persistent(persistent) - self.delivery_mode = 2 if self.persistent else 1 - exchange = exchange or conf.CELERY_RESULT_EXCHANGE - exchange_type = exchange_type or conf.CELERY_RESULT_EXCHANGE_TYPE - self.exchange = self._create_exchange( - exchange, exchange_type, self.delivery_mode, - ) - self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER - self.auto_delete = auto_delete - - self.expires = None - if 'expires' not in kwargs or kwargs['expires'] is not None: - self.expires = self.prepare_expires(kwargs.get('expires')) - self.queue_arguments = dictfilter({ - 'x-expires': maybe_s_to_ms(self.expires), - }) - - def _create_exchange(self, name, type='direct', delivery_mode=2): - return self.Exchange(name=name, - type=type, - delivery_mode=delivery_mode, - durable=self.persistent, - auto_delete=False) - - def _create_binding(self, task_id): - name = self.rkey(task_id) - return self.Queue(name=name, - exchange=self.exchange, - routing_key=name, - durable=self.persistent, - auto_delete=self.auto_delete, - queue_arguments=self.queue_arguments) - - def revive(self, channel): - pass - - def rkey(self, task_id): - return task_id.replace('-', '') - - def destination_for(self, task_id, request): - if request: - return self.rkey(task_id), request.correlation_id or task_id - return self.rkey(task_id), task_id - - def store_result(self, task_id, result, status, - traceback=None, request=None, **kwargs): - """Send task return value and status.""" - routing_key, correlation_id = self.destination_for(task_id, request) - if not routing_key: - return - with self.app.amqp.producer_pool.acquire(block=True) as producer: - producer.publish( - {'task_id': task_id, 'status': status, - 'result': self.encode_result(result, status), - 'traceback': traceback, - 'children': self.current_task_children(request)}, - exchange=self.exchange, - routing_key=routing_key, - correlation_id=correlation_id, - serializer=self.serializer, - retry=True, retry_policy=self.retry_policy, - declare=self.on_reply_declare(task_id), - delivery_mode=self.delivery_mode, - ) - return result - - def on_reply_declare(self, task_id): - return [self._create_binding(task_id)] - - def wait_for(self, task_id, timeout=None, cache=True, - no_ack=True, on_interval=None, - READY_STATES=states.READY_STATES, - PROPAGATE_STATES=states.PROPAGATE_STATES, - **kwargs): - cached_meta = self._cache.get(task_id) - if cache and cached_meta and \ - cached_meta['status'] in READY_STATES: - return cached_meta - else: - try: - return self.consume(task_id, timeout=timeout, no_ack=no_ack, - on_interval=on_interval) - except socket.timeout: - raise TimeoutError('The operation timed out.') - - def get_task_meta(self, task_id, backlog_limit=1000): - # Polling and using basic_get - with self.app.pool.acquire_channel(block=True) as (_, channel): - binding = self._create_binding(task_id)(channel) - binding.declare() - - prev = latest = acc = None - for i in range(backlog_limit): # spool ffwd - acc = binding.get( - accept=self.accept, no_ack=False, - ) - if not acc: # no more messages - break - if acc.payload['task_id'] == task_id: - prev, latest = latest, acc - if prev: - # backends are not expected to keep history, - # so we delete everything except the most recent state. - prev.ack() - prev = None - else: - raise self.BacklogLimitExceeded(task_id) - - if latest: - payload = self._cache[task_id] = \ - self.meta_from_decoded(latest.payload) - latest.requeue() - return payload - else: - # no new state, use previous - try: - return self._cache[task_id] - except KeyError: - # result probably pending. - return {'status': states.PENDING, 'result': None} - poll = get_task_meta # XXX compat - - def drain_events(self, connection, consumer, - timeout=None, on_interval=None, now=monotonic, wait=None): - wait = wait or connection.drain_events - results = {} - - def callback(meta, message): - if meta['status'] in states.READY_STATES: - results[meta['task_id']] = self.meta_from_decoded(meta) - - consumer.callbacks[:] = [callback] - time_start = now() - - while 1: - # Total time spent may exceed a single call to wait() - if timeout and now() - time_start >= timeout: - raise socket.timeout() - try: - wait(timeout=1) - except socket.timeout: - pass - if on_interval: - on_interval() - if results: # got event on the wanted channel. - break - self._cache.update(results) - return results - - def consume(self, task_id, timeout=None, no_ack=True, on_interval=None): - wait = self.drain_events - with self.app.pool.acquire_channel(block=True) as (conn, channel): - binding = self._create_binding(task_id) - with self.Consumer(channel, binding, - no_ack=no_ack, accept=self.accept) as consumer: - while 1: - try: - return wait( - conn, consumer, timeout, on_interval)[task_id] - except KeyError: - continue - - def _many_bindings(self, ids): - return [self._create_binding(task_id) for task_id in ids] - - def get_many(self, task_ids, timeout=None, no_ack=True, - now=monotonic, getfields=itemgetter('status', 'task_id'), - READY_STATES=states.READY_STATES, - PROPAGATE_STATES=states.PROPAGATE_STATES, **kwargs): - with self.app.pool.acquire_channel(block=True) as (conn, channel): - ids = set(task_ids) - cached_ids = set() - mark_cached = cached_ids.add - for task_id in ids: - try: - cached = self._cache[task_id] - except KeyError: - pass - else: - if cached['status'] in READY_STATES: - yield task_id, cached - mark_cached(task_id) - ids.difference_update(cached_ids) - results = deque() - push_result = results.append - push_cache = self._cache.__setitem__ - decode_result = self.meta_from_decoded - - def on_message(message): - body = decode_result(message.decode()) - state, uid = getfields(body) - if state in READY_STATES: - push_result(body) \ - if uid in task_ids else push_cache(uid, body) - - bindings = self._many_bindings(task_ids) - with self.Consumer(channel, bindings, on_message=on_message, - accept=self.accept, no_ack=no_ack): - wait = conn.drain_events - popleft = results.popleft - while ids: - wait(timeout=timeout) - while results: - state = popleft() - task_id = state['task_id'] - ids.discard(task_id) - push_cache(task_id, state) - yield task_id, state - - def reload_task_result(self, task_id): - raise NotImplementedError( - 'reload_task_result is not supported by this backend.') - - def reload_group_result(self, task_id): - """Reload group result, even if it has been previously fetched.""" - raise NotImplementedError( - 'reload_group_result is not supported by this backend.') - - def save_group(self, group_id, result): - raise NotImplementedError( - 'save_group is not supported by this backend.') - - def restore_group(self, group_id, cache=True): - raise NotImplementedError( - 'restore_group is not supported by this backend.') - - def delete_group(self, group_id): - raise NotImplementedError( - 'delete_group is not supported by this backend.') - - def as_uri(self, include_password=True): - return 'amqp://' - - def __reduce__(self, args=(), kwargs={}): - kwargs.update( - connection=self._connection, - exchange=self.exchange.name, - exchange_type=self.exchange.type, - persistent=self.persistent, - serializer=self.serializer, - auto_delete=self.auto_delete, - expires=self.expires, - ) - return super(AMQPBackend, self).__reduce__(args, kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/base.py b/thesisenv/lib/python3.6/site-packages/celery/backends/base.py deleted file mode 100644 index 03b6909..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/backends/base.py +++ /dev/null @@ -1,623 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.backends.base - ~~~~~~~~~~~~~~~~~~~~ - - Result backend base classes. - - - :class:`BaseBackend` defines the interface. - - - :class:`KeyValueStoreBackend` is a common base class - using K/V semantics like _get and _put. - -""" -from __future__ import absolute_import - -import time -import sys - -from datetime import timedelta - -from billiard.einfo import ExceptionInfo -from kombu.serialization import ( - dumps, loads, prepare_accept_content, - registry as serializer_registry, -) -from kombu.utils.encoding import bytes_to_str, ensure_bytes, from_utf8 -from kombu.utils.url import maybe_sanitize_url - -from celery import states -from celery import current_app, maybe_signature -from celery.app import current_task -from celery.exceptions import ChordError, TimeoutError, TaskRevokedError -from celery.five import items -from celery.result import ( - GroupResult, ResultBase, allow_join_result, result_from_tuple, -) -from celery.utils import timeutils -from celery.utils.functional import LRUCache -from celery.utils.log import get_logger -from celery.utils.serialization import ( - get_pickled_exception, - get_pickleable_exception, - create_exception_cls, -) - -__all__ = ['BaseBackend', 'KeyValueStoreBackend', 'DisabledBackend'] - -EXCEPTION_ABLE_CODECS = frozenset(['pickle']) -PY3 = sys.version_info >= (3, 0) - -logger = get_logger(__name__) - - -def unpickle_backend(cls, args, kwargs): - """Return an unpickled backend.""" - return cls(*args, app=current_app._get_current_object(), **kwargs) - - -class _nulldict(dict): - - def ignore(self, *a, **kw): - pass - __setitem__ = update = setdefault = ignore - - -class BaseBackend(object): - READY_STATES = states.READY_STATES - UNREADY_STATES = states.UNREADY_STATES - EXCEPTION_STATES = states.EXCEPTION_STATES - - TimeoutError = TimeoutError - - #: Time to sleep between polling each individual item - #: in `ResultSet.iterate`. as opposed to the `interval` - #: argument which is for each pass. - subpolling_interval = None - - #: If true the backend must implement :meth:`get_many`. - supports_native_join = False - - #: If true the backend must automatically expire results. - #: The daily backend_cleanup periodic task will not be triggered - #: in this case. - supports_autoexpire = False - - #: Set to true if the backend is peristent by default. - persistent = True - - retry_policy = { - 'max_retries': 20, - 'interval_start': 0, - 'interval_step': 1, - 'interval_max': 1, - } - - def __init__(self, app, - serializer=None, max_cached_results=None, accept=None, - url=None, **kwargs): - self.app = app - conf = self.app.conf - self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER - (self.content_type, - self.content_encoding, - self.encoder) = serializer_registry._encoders[self.serializer] - cmax = max_cached_results or conf.CELERY_MAX_CACHED_RESULTS - self._cache = _nulldict() if cmax == -1 else LRUCache(limit=cmax) - self.accept = prepare_accept_content( - conf.CELERY_ACCEPT_CONTENT if accept is None else accept, - ) - self.url = url - - def as_uri(self, include_password=False): - """Return the backend as an URI, sanitizing the password or not""" - # when using maybe_sanitize_url(), "/" is added - # we're stripping it for consistency - if include_password: - return self.url - url = maybe_sanitize_url(self.url or '') - return url[:-1] if url.endswith(':///') else url - - def mark_as_started(self, task_id, **meta): - """Mark a task as started""" - return self.store_result(task_id, meta, status=states.STARTED) - - def mark_as_done(self, task_id, result, request=None): - """Mark task as successfully executed.""" - return self.store_result(task_id, result, - status=states.SUCCESS, request=request) - - def mark_as_failure(self, task_id, exc, traceback=None, request=None): - """Mark task as executed with failure. Stores the exception.""" - return self.store_result(task_id, exc, status=states.FAILURE, - traceback=traceback, request=request) - - def chord_error_from_stack(self, callback, exc=None): - from celery import group - app = self.app - backend = app._tasks[callback.task].backend - try: - group( - [app.signature(errback) - for errback in callback.options.get('link_error') or []], - app=app, - ).apply_async((callback.id, )) - except Exception as eb_exc: - return backend.fail_from_current_stack(callback.id, exc=eb_exc) - else: - return backend.fail_from_current_stack(callback.id, exc=exc) - - def fail_from_current_stack(self, task_id, exc=None): - type_, real_exc, tb = sys.exc_info() - try: - exc = real_exc if exc is None else exc - ei = ExceptionInfo((type_, exc, tb)) - self.mark_as_failure(task_id, exc, ei.traceback) - return ei - finally: - del(tb) - - def mark_as_retry(self, task_id, exc, traceback=None, request=None): - """Mark task as being retries. Stores the current - exception (if any).""" - return self.store_result(task_id, exc, status=states.RETRY, - traceback=traceback, request=request) - - def mark_as_revoked(self, task_id, reason='', request=None): - return self.store_result(task_id, TaskRevokedError(reason), - status=states.REVOKED, traceback=None, - request=request) - - def prepare_exception(self, exc, serializer=None): - """Prepare exception for serialization.""" - serializer = self.serializer if serializer is None else serializer - if serializer in EXCEPTION_ABLE_CODECS: - return get_pickleable_exception(exc) - return {'exc_type': type(exc).__name__, 'exc_message': str(exc)} - - def exception_to_python(self, exc): - """Convert serialized exception to Python exception.""" - if exc: - if not isinstance(exc, BaseException): - exc = create_exception_cls( - from_utf8(exc['exc_type']), __name__)(exc['exc_message']) - if self.serializer in EXCEPTION_ABLE_CODECS: - exc = get_pickled_exception(exc) - return exc - - def prepare_value(self, result): - """Prepare value for storage.""" - if self.serializer != 'pickle' and isinstance(result, ResultBase): - return result.as_tuple() - return result - - def encode(self, data): - _, _, payload = dumps(data, serializer=self.serializer) - return payload - - def meta_from_decoded(self, meta): - if meta['status'] in self.EXCEPTION_STATES: - meta['result'] = self.exception_to_python(meta['result']) - return meta - - def decode_result(self, payload): - return self.meta_from_decoded(self.decode(payload)) - - def decode(self, payload): - payload = PY3 and payload or str(payload) - return loads(payload, - content_type=self.content_type, - content_encoding=self.content_encoding, - accept=self.accept) - - def wait_for(self, task_id, - timeout=None, interval=0.5, no_ack=True, on_interval=None): - """Wait for task and return its result. - - If the task raises an exception, this exception - will be re-raised by :func:`wait_for`. - - If `timeout` is not :const:`None`, this raises the - :class:`celery.exceptions.TimeoutError` exception if the operation - takes longer than `timeout` seconds. - - """ - - time_elapsed = 0.0 - - while 1: - meta = self.get_task_meta(task_id) - if meta['status'] in states.READY_STATES: - return meta - if on_interval: - on_interval() - # avoid hammering the CPU checking status. - time.sleep(interval) - time_elapsed += interval - if timeout and time_elapsed >= timeout: - raise TimeoutError('The operation timed out.') - - def prepare_expires(self, value, type=None): - if value is None: - value = self.app.conf.CELERY_TASK_RESULT_EXPIRES - if isinstance(value, timedelta): - value = timeutils.timedelta_seconds(value) - if value is not None and type: - return type(value) - return value - - def prepare_persistent(self, enabled=None): - if enabled is not None: - return enabled - p = self.app.conf.CELERY_RESULT_PERSISTENT - return self.persistent if p is None else p - - def encode_result(self, result, status): - if isinstance(result, ExceptionInfo): - result = result.exception - if status in self.EXCEPTION_STATES and isinstance(result, Exception): - return self.prepare_exception(result) - else: - return self.prepare_value(result) - - def is_cached(self, task_id): - return task_id in self._cache - - def store_result(self, task_id, result, status, - traceback=None, request=None, **kwargs): - """Update task state and result.""" - result = self.encode_result(result, status) - self._store_result(task_id, result, status, traceback, - request=request, **kwargs) - return result - - def forget(self, task_id): - self._cache.pop(task_id, None) - self._forget(task_id) - - def _forget(self, task_id): - raise NotImplementedError('backend does not implement forget.') - - def get_status(self, task_id): - """Get the status of a task.""" - return self.get_task_meta(task_id)['status'] - - def get_traceback(self, task_id): - """Get the traceback for a failed task.""" - return self.get_task_meta(task_id).get('traceback') - - def get_result(self, task_id): - """Get the result of a task.""" - return self.get_task_meta(task_id).get('result') - - def get_children(self, task_id): - """Get the list of subtasks sent by a task.""" - try: - return self.get_task_meta(task_id)['children'] - except KeyError: - pass - - def get_task_meta(self, task_id, cache=True): - if cache: - try: - return self._cache[task_id] - except KeyError: - pass - - meta = self._get_task_meta_for(task_id) - if cache and meta.get('status') == states.SUCCESS: - self._cache[task_id] = meta - return meta - - def reload_task_result(self, task_id): - """Reload task result, even if it has been previously fetched.""" - self._cache[task_id] = self.get_task_meta(task_id, cache=False) - - def reload_group_result(self, group_id): - """Reload group result, even if it has been previously fetched.""" - self._cache[group_id] = self.get_group_meta(group_id, cache=False) - - def get_group_meta(self, group_id, cache=True): - if cache: - try: - return self._cache[group_id] - except KeyError: - pass - - meta = self._restore_group(group_id) - if cache and meta is not None: - self._cache[group_id] = meta - return meta - - def restore_group(self, group_id, cache=True): - """Get the result for a group.""" - meta = self.get_group_meta(group_id, cache=cache) - if meta: - return meta['result'] - - def save_group(self, group_id, result): - """Store the result of an executed group.""" - return self._save_group(group_id, result) - - def delete_group(self, group_id): - self._cache.pop(group_id, None) - return self._delete_group(group_id) - - def cleanup(self): - """Backend cleanup. Is run by - :class:`celery.task.DeleteExpiredTaskMetaTask`.""" - pass - - def process_cleanup(self): - """Cleanup actions to do at the end of a task worker process.""" - pass - - def on_task_call(self, producer, task_id): - return {} - - def on_chord_part_return(self, task, state, result, propagate=False): - pass - - def fallback_chord_unlock(self, group_id, body, result=None, - countdown=1, **kwargs): - kwargs['result'] = [r.as_tuple() for r in result] - self.app.tasks['celery.chord_unlock'].apply_async( - (group_id, body, ), kwargs, countdown=countdown, - ) - - def apply_chord(self, header, partial_args, group_id, body, **options): - result = header(*partial_args, task_id=group_id) - self.fallback_chord_unlock(group_id, body, **options) - return result - - def current_task_children(self, request=None): - request = request or getattr(current_task(), 'request', None) - if request: - return [r.as_tuple() for r in getattr(request, 'children', [])] - - def __reduce__(self, args=(), kwargs={}): - return (unpickle_backend, (self.__class__, args, kwargs)) -BaseDictBackend = BaseBackend # XXX compat - - -class KeyValueStoreBackend(BaseBackend): - key_t = ensure_bytes - task_keyprefix = 'celery-task-meta-' - group_keyprefix = 'celery-taskset-meta-' - chord_keyprefix = 'chord-unlock-' - implements_incr = False - - def __init__(self, *args, **kwargs): - if hasattr(self.key_t, '__func__'): - self.key_t = self.key_t.__func__ # remove binding - self._encode_prefixes() - super(KeyValueStoreBackend, self).__init__(*args, **kwargs) - if self.implements_incr: - self.apply_chord = self._apply_chord_incr - - def _encode_prefixes(self): - self.task_keyprefix = self.key_t(self.task_keyprefix) - self.group_keyprefix = self.key_t(self.group_keyprefix) - self.chord_keyprefix = self.key_t(self.chord_keyprefix) - - def get(self, key): - raise NotImplementedError('Must implement the get method.') - - def mget(self, keys): - raise NotImplementedError('Does not support get_many') - - def set(self, key, value): - raise NotImplementedError('Must implement the set method.') - - def delete(self, key): - raise NotImplementedError('Must implement the delete method') - - def incr(self, key): - raise NotImplementedError('Does not implement incr') - - def expire(self, key, value): - pass - - def get_key_for_task(self, task_id, key=''): - """Get the cache key for a task by id.""" - key_t = self.key_t - return key_t('').join([ - self.task_keyprefix, key_t(task_id), key_t(key), - ]) - - def get_key_for_group(self, group_id, key=''): - """Get the cache key for a group by id.""" - key_t = self.key_t - return key_t('').join([ - self.group_keyprefix, key_t(group_id), key_t(key), - ]) - - def get_key_for_chord(self, group_id, key=''): - """Get the cache key for the chord waiting on group with given id.""" - key_t = self.key_t - return key_t('').join([ - self.chord_keyprefix, key_t(group_id), key_t(key), - ]) - - def _strip_prefix(self, key): - """Takes bytes, emits string.""" - key = self.key_t(key) - for prefix in self.task_keyprefix, self.group_keyprefix: - if key.startswith(prefix): - return bytes_to_str(key[len(prefix):]) - return bytes_to_str(key) - - def _filter_ready(self, values, READY_STATES=states.READY_STATES): - for k, v in values: - if v is not None: - v = self.decode_result(v) - if v['status'] in READY_STATES: - yield k, v - - def _mget_to_results(self, values, keys): - if hasattr(values, 'items'): - # client returns dict so mapping preserved. - return dict((self._strip_prefix(k), v) - for k, v in self._filter_ready(items(values))) - else: - # client returns list so need to recreate mapping. - return dict((bytes_to_str(keys[i]), v) - for i, v in self._filter_ready(enumerate(values))) - - def get_many(self, task_ids, timeout=None, interval=0.5, no_ack=True, - READY_STATES=states.READY_STATES): - interval = 0.5 if interval is None else interval - ids = task_ids if isinstance(task_ids, set) else set(task_ids) - cached_ids = set() - cache = self._cache - for task_id in ids: - try: - cached = cache[task_id] - except KeyError: - pass - else: - if cached['status'] in READY_STATES: - yield bytes_to_str(task_id), cached - cached_ids.add(task_id) - - ids.difference_update(cached_ids) - iterations = 0 - while ids: - keys = list(ids) - r = self._mget_to_results(self.mget([self.get_key_for_task(k) - for k in keys]), keys) - cache.update(r) - ids.difference_update(set(bytes_to_str(v) for v in r)) - for key, value in items(r): - yield bytes_to_str(key), value - if timeout and iterations * interval >= timeout: - raise TimeoutError('Operation timed out ({0})'.format(timeout)) - time.sleep(interval) # don't busy loop. - iterations += 1 - - def _forget(self, task_id): - self.delete(self.get_key_for_task(task_id)) - - def _store_result(self, task_id, result, status, - traceback=None, request=None, **kwargs): - meta = {'status': status, 'result': result, 'traceback': traceback, - 'children': self.current_task_children(request)} - self.set(self.get_key_for_task(task_id), self.encode(meta)) - return result - - def _save_group(self, group_id, result): - self.set(self.get_key_for_group(group_id), - self.encode({'result': result.as_tuple()})) - return result - - def _delete_group(self, group_id): - self.delete(self.get_key_for_group(group_id)) - - def _get_task_meta_for(self, task_id): - """Get task metadata for a task by id.""" - meta = self.get(self.get_key_for_task(task_id)) - if not meta: - return {'status': states.PENDING, 'result': None} - return self.decode_result(meta) - - def _restore_group(self, group_id): - """Get task metadata for a task by id.""" - meta = self.get(self.get_key_for_group(group_id)) - # previously this was always pickled, but later this - # was extended to support other serializers, so the - # structure is kind of weird. - if meta: - meta = self.decode(meta) - result = meta['result'] - meta['result'] = result_from_tuple(result, self.app) - return meta - - def _apply_chord_incr(self, header, partial_args, group_id, body, - result=None, **options): - self.save_group(group_id, self.app.GroupResult(group_id, result)) - return header(*partial_args, task_id=group_id) - - def on_chord_part_return(self, task, state, result, propagate=None): - if not self.implements_incr: - return - app = self.app - if propagate is None: - propagate = app.conf.CELERY_CHORD_PROPAGATES - gid = task.request.group - if not gid: - return - key = self.get_key_for_chord(gid) - try: - deps = GroupResult.restore(gid, backend=task.backend) - except Exception as exc: - callback = maybe_signature(task.request.chord, app=app) - logger.error('Chord %r raised: %r', gid, exc, exc_info=1) - return self.chord_error_from_stack( - callback, - ChordError('Cannot restore group: {0!r}'.format(exc)), - ) - if deps is None: - try: - raise ValueError(gid) - except ValueError as exc: - callback = maybe_signature(task.request.chord, app=app) - logger.error('Chord callback %r raised: %r', gid, exc, - exc_info=1) - return self.chord_error_from_stack( - callback, - ChordError('GroupResult {0} no longer exists'.format(gid)), - ) - val = self.incr(key) - size = len(deps) - if val > size: - logger.warning('Chord counter incremented too many times for %r', - gid) - elif val == size: - callback = maybe_signature(task.request.chord, app=app) - j = deps.join_native if deps.supports_native_join else deps.join - try: - with allow_join_result(): - ret = j(timeout=3.0, propagate=propagate) - except Exception as exc: - try: - culprit = next(deps._failed_join_report()) - reason = 'Dependency {0.id} raised {1!r}'.format( - culprit, exc, - ) - except StopIteration: - reason = repr(exc) - - logger.error('Chord %r raised: %r', gid, reason, exc_info=1) - self.chord_error_from_stack(callback, ChordError(reason)) - else: - try: - callback.delay(ret) - except Exception as exc: - logger.error('Chord %r raised: %r', gid, exc, exc_info=1) - self.chord_error_from_stack( - callback, - ChordError('Callback error: {0!r}'.format(exc)), - ) - finally: - deps.delete() - self.client.delete(key) - else: - self.expire(key, 86400) - - -class DisabledBackend(BaseBackend): - _cache = {} # need this attribute to reset cache in tests. - - def store_result(self, *args, **kwargs): - pass - - def _is_disabled(self, *args, **kwargs): - raise NotImplementedError( - 'No result backend configured. ' - 'Please see the documentation for more information.') - - def as_uri(self, *args, **kwargs): - return 'disabled://' - - get_state = get_status = get_result = get_traceback = _is_disabled - wait_for = get_many = _is_disabled diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/cache.py b/thesisenv/lib/python3.6/site-packages/celery/backends/cache.py deleted file mode 100644 index 3c8230c..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/backends/cache.py +++ /dev/null @@ -1,161 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.backends.cache - ~~~~~~~~~~~~~~~~~~~~~ - - Memcache and in-memory cache result backend. - -""" -from __future__ import absolute_import - -import sys - -from kombu.utils import cached_property -from kombu.utils.encoding import bytes_to_str, ensure_bytes - -from celery.exceptions import ImproperlyConfigured -from celery.utils.functional import LRUCache - -from .base import KeyValueStoreBackend - -__all__ = ['CacheBackend'] - -_imp = [None] - -PY3 = sys.version_info[0] == 3 - -REQUIRES_BACKEND = """\ -The memcached backend requires either pylibmc or python-memcached.\ -""" - -UNKNOWN_BACKEND = """\ -The cache backend {0!r} is unknown, -Please use one of the following backends instead: {1}\ -""" - - -def import_best_memcache(): - if _imp[0] is None: - is_pylibmc, memcache_key_t = False, ensure_bytes - try: - import pylibmc as memcache - is_pylibmc = True - except ImportError: - try: - import memcache # noqa - except ImportError: - raise ImproperlyConfigured(REQUIRES_BACKEND) - if PY3: - memcache_key_t = bytes_to_str - _imp[0] = (is_pylibmc, memcache, memcache_key_t) - return _imp[0] - - -def get_best_memcache(*args, **kwargs): - is_pylibmc, memcache, key_t = import_best_memcache() - Client = _Client = memcache.Client - - if not is_pylibmc: - def Client(*args, **kwargs): # noqa - kwargs.pop('behaviors', None) - return _Client(*args, **kwargs) - - return Client, key_t - - -class DummyClient(object): - - def __init__(self, *args, **kwargs): - self.cache = LRUCache(limit=5000) - - def get(self, key, *args, **kwargs): - return self.cache.get(key) - - def get_multi(self, keys): - cache = self.cache - return dict((k, cache[k]) for k in keys if k in cache) - - def set(self, key, value, *args, **kwargs): - self.cache[key] = value - - def delete(self, key, *args, **kwargs): - self.cache.pop(key, None) - - def incr(self, key, delta=1): - return self.cache.incr(key, delta) - - -backends = {'memcache': get_best_memcache, - 'memcached': get_best_memcache, - 'pylibmc': get_best_memcache, - 'memory': lambda: (DummyClient, ensure_bytes)} - - -class CacheBackend(KeyValueStoreBackend): - servers = None - supports_autoexpire = True - supports_native_join = True - implements_incr = True - - def __init__(self, app, expires=None, backend=None, - options={}, url=None, **kwargs): - super(CacheBackend, self).__init__(app, **kwargs) - self.url = url - - self.options = dict(self.app.conf.CELERY_CACHE_BACKEND_OPTIONS, - **options) - - self.backend = url or backend or self.app.conf.CELERY_CACHE_BACKEND - if self.backend: - self.backend, _, servers = self.backend.partition('://') - self.servers = servers.rstrip('/').split(';') - self.expires = self.prepare_expires(expires, type=int) - try: - self.Client, self.key_t = backends[self.backend]() - except KeyError: - raise ImproperlyConfigured(UNKNOWN_BACKEND.format( - self.backend, ', '.join(backends))) - self._encode_prefixes() # rencode the keyprefixes - - def get(self, key): - return self.client.get(key) - - def mget(self, keys): - return self.client.get_multi(keys) - - def set(self, key, value): - return self.client.set(key, value, self.expires) - - def delete(self, key): - return self.client.delete(key) - - def _apply_chord_incr(self, header, partial_args, group_id, body, **opts): - self.client.set(self.get_key_for_chord(group_id), 0, time=86400) - return super(CacheBackend, self)._apply_chord_incr( - header, partial_args, group_id, body, **opts - ) - - def incr(self, key): - return self.client.incr(key) - - @cached_property - def client(self): - return self.Client(self.servers, **self.options) - - def __reduce__(self, args=(), kwargs={}): - servers = ';'.join(self.servers) - backend = '{0}://{1}/'.format(self.backend, servers) - kwargs.update( - dict(backend=backend, - expires=self.expires, - options=self.options)) - return super(CacheBackend, self).__reduce__(args, kwargs) - - def as_uri(self, *args, **kwargs): - """Return the backend as an URI. - - This properly handles the case of multiple servers. - - """ - servers = ';'.join(self.servers) - return '{0}://{1}/'.format(self.backend, servers) diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/cassandra.py b/thesisenv/lib/python3.6/site-packages/celery/backends/cassandra.py deleted file mode 100644 index 79f17ee..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/backends/cassandra.py +++ /dev/null @@ -1,196 +0,0 @@ -# -* coding: utf-8 -*- -""" - celery.backends.cassandra - ~~~~~~~~~~~~~~~~~~~~~~~~~ - - Apache Cassandra result store backend. - -""" -from __future__ import absolute_import - -try: # pragma: no cover - import pycassa - from thrift import Thrift - C = pycassa.cassandra.ttypes -except ImportError: # pragma: no cover - pycassa = None # noqa - -import socket -import time - -from celery import states -from celery.exceptions import ImproperlyConfigured -from celery.five import monotonic -from celery.utils.log import get_logger -from celery.utils.timeutils import maybe_timedelta, timedelta_seconds - -from .base import BaseBackend - -__all__ = ['CassandraBackend'] - -logger = get_logger(__name__) - - -class CassandraBackend(BaseBackend): - """Highly fault tolerant Cassandra backend. - - .. attribute:: servers - - List of Cassandra servers with format: ``hostname:port``. - - :raises celery.exceptions.ImproperlyConfigured: if - module :mod:`pycassa` is not available. - - """ - servers = [] - keyspace = None - column_family = None - detailed_mode = False - _retry_timeout = 300 - _retry_wait = 3 - supports_autoexpire = True - - def __init__(self, servers=None, keyspace=None, column_family=None, - cassandra_options=None, detailed_mode=False, **kwargs): - """Initialize Cassandra backend. - - Raises :class:`celery.exceptions.ImproperlyConfigured` if - the :setting:`CASSANDRA_SERVERS` setting is not set. - - """ - super(CassandraBackend, self).__init__(**kwargs) - - self.expires = kwargs.get('expires') or maybe_timedelta( - self.app.conf.CELERY_TASK_RESULT_EXPIRES) - - if not pycassa: - raise ImproperlyConfigured( - 'You need to install the pycassa library to use the ' - 'Cassandra backend. See https://github.com/pycassa/pycassa') - - conf = self.app.conf - self.servers = (servers or - conf.get('CASSANDRA_SERVERS') or - self.servers) - self.keyspace = (keyspace or - conf.get('CASSANDRA_KEYSPACE') or - self.keyspace) - self.column_family = (column_family or - conf.get('CASSANDRA_COLUMN_FAMILY') or - self.column_family) - self.cassandra_options = dict(conf.get('CASSANDRA_OPTIONS') or {}, - **cassandra_options or {}) - self.detailed_mode = (detailed_mode or - conf.get('CASSANDRA_DETAILED_MODE') or - self.detailed_mode) - read_cons = conf.get('CASSANDRA_READ_CONSISTENCY') or 'LOCAL_QUORUM' - write_cons = conf.get('CASSANDRA_WRITE_CONSISTENCY') or 'LOCAL_QUORUM' - try: - self.read_consistency = getattr(pycassa.ConsistencyLevel, - read_cons) - except AttributeError: - self.read_consistency = pycassa.ConsistencyLevel.LOCAL_QUORUM - try: - self.write_consistency = getattr(pycassa.ConsistencyLevel, - write_cons) - except AttributeError: - self.write_consistency = pycassa.ConsistencyLevel.LOCAL_QUORUM - - if not self.servers or not self.keyspace or not self.column_family: - raise ImproperlyConfigured( - 'Cassandra backend not configured.') - - self._column_family = None - - def _retry_on_error(self, fun, *args, **kwargs): - ts = monotonic() + self._retry_timeout - while 1: - try: - return fun(*args, **kwargs) - except (pycassa.InvalidRequestException, - pycassa.TimedOutException, - pycassa.UnavailableException, - pycassa.AllServersUnavailable, - socket.error, - socket.timeout, - Thrift.TException) as exc: - if monotonic() > ts: - raise - logger.warning('Cassandra error: %r. Retrying...', exc) - time.sleep(self._retry_wait) - - def _get_column_family(self): - if self._column_family is None: - conn = pycassa.ConnectionPool(self.keyspace, - server_list=self.servers, - **self.cassandra_options) - self._column_family = pycassa.ColumnFamily( - conn, self.column_family, - read_consistency_level=self.read_consistency, - write_consistency_level=self.write_consistency, - ) - return self._column_family - - def process_cleanup(self): - if self._column_family is not None: - self._column_family = None - - def _store_result(self, task_id, result, status, - traceback=None, request=None, **kwargs): - """Store return value and status of an executed task.""" - - def _do_store(): - cf = self._get_column_family() - date_done = self.app.now() - meta = {'status': status, - 'date_done': date_done.strftime('%Y-%m-%dT%H:%M:%SZ'), - 'traceback': self.encode(traceback), - 'result': self.encode(result), - 'children': self.encode( - self.current_task_children(request), - )} - if self.detailed_mode: - cf.insert(task_id, {date_done: self.encode(meta)}, - ttl=self.expires and timedelta_seconds(self.expires)) - else: - cf.insert(task_id, meta, - ttl=self.expires and timedelta_seconds(self.expires)) - - return self._retry_on_error(_do_store) - - def as_uri(self, include_password=True): - return 'cassandra://' - - def _get_task_meta_for(self, task_id): - """Get task metadata for a task by id.""" - - def _do_get(): - cf = self._get_column_family() - try: - if self.detailed_mode: - row = cf.get(task_id, column_reversed=True, column_count=1) - obj = self.decode(list(row.values())[0]) - else: - obj = cf.get(task_id) - - meta = { - 'task_id': task_id, - 'status': obj['status'], - 'result': self.decode(obj['result']), - 'date_done': obj['date_done'], - 'traceback': self.decode(obj['traceback']), - 'children': self.decode(obj['children']), - } - except (KeyError, pycassa.NotFoundException): - meta = {'status': states.PENDING, 'result': None} - return meta - - return self._retry_on_error(_do_get) - - def __reduce__(self, args=(), kwargs={}): - kwargs.update( - dict(servers=self.servers, - keyspace=self.keyspace, - column_family=self.column_family, - cassandra_options=self.cassandra_options)) - return super(CassandraBackend, self).__reduce__(args, kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/couchbase.py b/thesisenv/lib/python3.6/site-packages/celery/backends/couchbase.py deleted file mode 100644 index cd7555e..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/backends/couchbase.py +++ /dev/null @@ -1,116 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.backends.couchbase - ~~~~~~~~~~~~~~~~~~~~~~~~~ - - CouchBase result store backend. - -""" -from __future__ import absolute_import - -import logging - -try: - from couchbase import Couchbase - from couchbase.connection import Connection - from couchbase.exceptions import NotFoundError -except ImportError: - Couchbase = Connection = NotFoundError = None # noqa - -from kombu.utils.url import _parse_url - -from celery.exceptions import ImproperlyConfigured -from celery.utils.timeutils import maybe_timedelta - -from .base import KeyValueStoreBackend - -__all__ = ['CouchBaseBackend'] - - -class CouchBaseBackend(KeyValueStoreBackend): - """CouchBase backend. - - :raises celery.exceptions.ImproperlyConfigured: if - module :mod:`couchbase` is not available. - - """ - bucket = 'default' - host = 'localhost' - port = 8091 - username = None - password = None - quiet = False - conncache = None - unlock_gil = True - timeout = 2.5 - transcoder = None - - def __init__(self, url=None, *args, **kwargs): - super(CouchBaseBackend, self).__init__(*args, **kwargs) - self.url = url - - self.expires = kwargs.get('expires') or maybe_timedelta( - self.app.conf.CELERY_TASK_RESULT_EXPIRES) - - if Couchbase is None: - raise ImproperlyConfigured( - 'You need to install the couchbase library to use the ' - 'CouchBase backend.', - ) - - uhost = uport = uname = upass = ubucket = None - if url: - _, uhost, uport, uname, upass, ubucket, _ = _parse_url(url) - ubucket = ubucket.strip('/') if ubucket else None - - config = self.app.conf.get('CELERY_COUCHBASE_BACKEND_SETTINGS', None) - if config is not None: - if not isinstance(config, dict): - raise ImproperlyConfigured( - 'Couchbase backend settings should be grouped in a dict', - ) - else: - config = {} - - self.host = uhost or config.get('host', self.host) - self.port = int(uport or config.get('port', self.port)) - self.bucket = ubucket or config.get('bucket', self.bucket) - self.username = uname or config.get('username', self.username) - self.password = upass or config.get('password', self.password) - - self._connection = None - - def _get_connection(self): - """Connect to the Couchbase server.""" - if self._connection is None: - kwargs = {'bucket': self.bucket, 'host': self.host} - - if self.port: - kwargs.update({'port': self.port}) - if self.username: - kwargs.update({'username': self.username}) - if self.password: - kwargs.update({'password': self.password}) - - logging.debug('couchbase settings %r', kwargs) - self._connection = Connection(**kwargs) - return self._connection - - @property - def connection(self): - return self._get_connection() - - def get(self, key): - try: - return self.connection.get(key).value - except NotFoundError: - return None - - def set(self, key, value): - self.connection.set(key, value) - - def mget(self, keys): - return [self.get(key) for key in keys] - - def delete(self, key): - self.connection.delete(key) diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/database/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/backends/database/__init__.py deleted file mode 100644 index f47fdd5..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/backends/database/__init__.py +++ /dev/null @@ -1,201 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.backends.database - ~~~~~~~~~~~~~~~~~~~~~~~~ - - SQLAlchemy result store backend. - -""" -from __future__ import absolute_import - -import logging -from contextlib import contextmanager -from functools import wraps - -from celery import states -from celery.backends.base import BaseBackend -from celery.exceptions import ImproperlyConfigured -from celery.five import range -from celery.utils.timeutils import maybe_timedelta - -from .models import Task -from .models import TaskSet -from .session import SessionManager - -logger = logging.getLogger(__name__) - -__all__ = ['DatabaseBackend'] - - -def _sqlalchemy_installed(): - try: - import sqlalchemy - except ImportError: - raise ImproperlyConfigured( - 'The database result backend requires SQLAlchemy to be installed.' - 'See http://pypi.python.org/pypi/SQLAlchemy') - return sqlalchemy -_sqlalchemy_installed() - -from sqlalchemy.exc import DatabaseError, InvalidRequestError # noqa -from sqlalchemy.orm.exc import StaleDataError # noqa - - -@contextmanager -def session_cleanup(session): - try: - yield - except Exception: - session.rollback() - raise - finally: - session.close() - - -def retry(fun): - - @wraps(fun) - def _inner(*args, **kwargs): - max_retries = kwargs.pop('max_retries', 3) - - for retries in range(max_retries): - try: - return fun(*args, **kwargs) - except (DatabaseError, InvalidRequestError, StaleDataError): - logger.warning( - "Failed operation %s. Retrying %s more times.", - fun.__name__, max_retries - retries - 1, - exc_info=True, - ) - if retries + 1 >= max_retries: - raise - - return _inner - - -class DatabaseBackend(BaseBackend): - """The database result backend.""" - # ResultSet.iterate should sleep this much between each pool, - # to not bombard the database with queries. - subpolling_interval = 0.5 - - def __init__(self, dburi=None, expires=None, - engine_options=None, url=None, **kwargs): - # The `url` argument was added later and is used by - # the app to set backend by url (celery.backends.get_backend_by_url) - super(DatabaseBackend, self).__init__(**kwargs) - conf = self.app.conf - self.expires = maybe_timedelta(self.prepare_expires(expires)) - self.url = url or dburi or conf.CELERY_RESULT_DBURI - self.engine_options = dict( - engine_options or {}, - **conf.CELERY_RESULT_ENGINE_OPTIONS or {}) - self.short_lived_sessions = kwargs.get( - 'short_lived_sessions', - conf.CELERY_RESULT_DB_SHORT_LIVED_SESSIONS, - ) - - tablenames = conf.CELERY_RESULT_DB_TABLENAMES or {} - Task.__table__.name = tablenames.get('task', 'celery_taskmeta') - TaskSet.__table__.name = tablenames.get('group', 'celery_tasksetmeta') - - if not self.url: - raise ImproperlyConfigured( - 'Missing connection string! Do you have ' - 'CELERY_RESULT_DBURI set to a real value?') - - def ResultSession(self, session_manager=SessionManager()): - return session_manager.session_factory( - dburi=self.url, - short_lived_sessions=self.short_lived_sessions, - **self.engine_options - ) - - @retry - def _store_result(self, task_id, result, status, - traceback=None, max_retries=3, **kwargs): - """Store return value and status of an executed task.""" - session = self.ResultSession() - with session_cleanup(session): - task = list(session.query(Task).filter(Task.task_id == task_id)) - task = task and task[0] - if not task: - task = Task(task_id) - session.add(task) - session.flush() - task.result = result - task.status = status - task.traceback = traceback - session.commit() - return result - - @retry - def _get_task_meta_for(self, task_id): - """Get task metadata for a task by id.""" - session = self.ResultSession() - with session_cleanup(session): - task = list(session.query(Task).filter(Task.task_id == task_id)) - task = task and task[0] - if not task: - task = Task(task_id) - task.status = states.PENDING - task.result = None - return self.meta_from_decoded(task.to_dict()) - - @retry - def _save_group(self, group_id, result): - """Store the result of an executed group.""" - session = self.ResultSession() - with session_cleanup(session): - group = TaskSet(group_id, result) - session.add(group) - session.flush() - session.commit() - return result - - @retry - def _restore_group(self, group_id): - """Get metadata for group by id.""" - session = self.ResultSession() - with session_cleanup(session): - group = session.query(TaskSet).filter( - TaskSet.taskset_id == group_id).first() - if group: - return group.to_dict() - - @retry - def _delete_group(self, group_id): - """Delete metadata for group by id.""" - session = self.ResultSession() - with session_cleanup(session): - session.query(TaskSet).filter( - TaskSet.taskset_id == group_id).delete() - session.flush() - session.commit() - - @retry - def _forget(self, task_id): - """Forget about result.""" - session = self.ResultSession() - with session_cleanup(session): - session.query(Task).filter(Task.task_id == task_id).delete() - session.commit() - - def cleanup(self): - """Delete expired metadata.""" - session = self.ResultSession() - expires = self.expires - now = self.app.now() - with session_cleanup(session): - session.query(Task).filter( - Task.date_done < (now - expires)).delete() - session.query(TaskSet).filter( - TaskSet.date_done < (now - expires)).delete() - session.commit() - - def __reduce__(self, args=(), kwargs={}): - kwargs.update( - dict(dburi=self.url, - expires=self.expires, - engine_options=self.engine_options)) - return super(DatabaseBackend, self).__reduce__(args, kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/database/models.py b/thesisenv/lib/python3.6/site-packages/celery/backends/database/models.py deleted file mode 100644 index 2802a00..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/backends/database/models.py +++ /dev/null @@ -1,74 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.backends.database.models - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Database tables for the SQLAlchemy result store backend. - -""" -from __future__ import absolute_import - -from datetime import datetime - -import sqlalchemy as sa -from sqlalchemy.types import PickleType - -from celery import states - -from .session import ResultModelBase - -__all__ = ['Task', 'TaskSet'] - - -class Task(ResultModelBase): - """Task result/status.""" - __tablename__ = 'celery_taskmeta' - __table_args__ = {'sqlite_autoincrement': True} - - id = sa.Column(sa.Integer, sa.Sequence('task_id_sequence'), - primary_key=True, - autoincrement=True) - task_id = sa.Column(sa.String(255), unique=True) - status = sa.Column(sa.String(50), default=states.PENDING) - result = sa.Column(PickleType, nullable=True) - date_done = sa.Column(sa.DateTime, default=datetime.utcnow, - onupdate=datetime.utcnow, nullable=True) - traceback = sa.Column(sa.Text, nullable=True) - - def __init__(self, task_id): - self.task_id = task_id - - def to_dict(self): - return {'task_id': self.task_id, - 'status': self.status, - 'result': self.result, - 'traceback': self.traceback, - 'date_done': self.date_done} - - def __repr__(self): - return ''.format(self) - - -class TaskSet(ResultModelBase): - """TaskSet result""" - __tablename__ = 'celery_tasksetmeta' - __table_args__ = {'sqlite_autoincrement': True} - - id = sa.Column(sa.Integer, sa.Sequence('taskset_id_sequence'), - autoincrement=True, primary_key=True) - taskset_id = sa.Column(sa.String(255), unique=True) - result = sa.Column(PickleType, nullable=True) - date_done = sa.Column(sa.DateTime, default=datetime.utcnow, - nullable=True) - - def __init__(self, taskset_id, result): - self.taskset_id = taskset_id - self.result = result - - def to_dict(self): - return {'taskset_id': self.taskset_id, - 'result': self.result, - 'date_done': self.date_done} - - def __repr__(self): - return ''.format(self) diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/database/session.py b/thesisenv/lib/python3.6/site-packages/celery/backends/database/session.py deleted file mode 100644 index 1575d7f..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/backends/database/session.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.backends.database.session - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - SQLAlchemy sessions. - -""" -from __future__ import absolute_import - -from billiard.util import register_after_fork - -from sqlalchemy import create_engine -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.orm import sessionmaker -from sqlalchemy.pool import NullPool - -ResultModelBase = declarative_base() - -__all__ = ['SessionManager'] - - -class SessionManager(object): - def __init__(self): - self._engines = {} - self._sessions = {} - self.forked = False - self.prepared = False - register_after_fork(self, self._after_fork) - - def _after_fork(self,): - self.forked = True - - def get_engine(self, dburi, **kwargs): - if self.forked: - try: - return self._engines[dburi] - except KeyError: - engine = self._engines[dburi] = create_engine(dburi, **kwargs) - return engine - else: - kwargs['poolclass'] = NullPool - return create_engine(dburi, **kwargs) - - def create_session(self, dburi, short_lived_sessions=False, **kwargs): - engine = self.get_engine(dburi, **kwargs) - if self.forked: - if short_lived_sessions or dburi not in self._sessions: - self._sessions[dburi] = sessionmaker(bind=engine) - return engine, self._sessions[dburi] - else: - return engine, sessionmaker(bind=engine) - - def prepare_models(self, engine): - if not self.prepared: - ResultModelBase.metadata.create_all(engine) - self.prepared = True - - def session_factory(self, dburi, **kwargs): - engine, session = self.create_session(dburi, **kwargs) - self.prepare_models(engine) - return session() diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/mongodb.py b/thesisenv/lib/python3.6/site-packages/celery/backends/mongodb.py deleted file mode 100644 index 281c38c..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/backends/mongodb.py +++ /dev/null @@ -1,264 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.backends.mongodb - ~~~~~~~~~~~~~~~~~~~~~~~ - - MongoDB result store backend. - -""" -from __future__ import absolute_import - -from datetime import datetime - -from kombu.syn import detect_environment -from kombu.utils import cached_property -from kombu.utils.url import maybe_sanitize_url - -from celery import states -from celery.exceptions import ImproperlyConfigured -from celery.five import items, string_t -from celery.utils.timeutils import maybe_timedelta - -from .base import BaseBackend - -try: - import pymongo -except ImportError: # pragma: no cover - pymongo = None # noqa - -if pymongo: - try: - from bson.binary import Binary - except ImportError: # pragma: no cover - from pymongo.binary import Binary # noqa -else: # pragma: no cover - Binary = None # noqa - -__all__ = ['MongoBackend'] - - -class MongoBackend(BaseBackend): - """MongoDB result backend. - - :raises celery.exceptions.ImproperlyConfigured: if - module :mod:`pymongo` is not available. - - """ - - host = 'localhost' - port = 27017 - user = None - password = None - database_name = 'celery' - taskmeta_collection = 'celery_taskmeta' - max_pool_size = 10 - options = None - - supports_autoexpire = False - - _connection = None - - def __init__(self, app=None, url=None, **kwargs): - self.options = {} - super(MongoBackend, self).__init__(app, **kwargs) - self.expires = kwargs.get('expires') or maybe_timedelta( - self.app.conf.CELERY_TASK_RESULT_EXPIRES) - - if not pymongo: - raise ImproperlyConfigured( - 'You need to install the pymongo library to use the ' - 'MongoDB backend.') - - config = self.app.conf.get('CELERY_MONGODB_BACKEND_SETTINGS') - if config is not None: - if not isinstance(config, dict): - raise ImproperlyConfigured( - 'MongoDB backend settings should be grouped in a dict') - config = dict(config) # do not modify original - - self.host = config.pop('host', self.host) - self.port = int(config.pop('port', self.port)) - self.user = config.pop('user', self.user) - self.password = config.pop('password', self.password) - self.database_name = config.pop('database', self.database_name) - self.taskmeta_collection = config.pop( - 'taskmeta_collection', self.taskmeta_collection, - ) - - self.options = dict(config, **config.pop('options', None) or {}) - - # Set option defaults - for key, value in items(self._prepare_client_options()): - self.options.setdefault(key, value) - - self.url = url - if self.url: - # Specifying backend as an URL - self.host = self.url - - def _prepare_client_options(self): - if pymongo.version_tuple >= (3, ): - return {'maxPoolSize': self.max_pool_size} - else: # pragma: no cover - options = { - 'max_pool_size': self.max_pool_size, - 'auto_start_request': False - } - if detect_environment() != 'default': - options['use_greenlets'] = True - return options - - def _get_connection(self): - """Connect to the MongoDB server.""" - if self._connection is None: - from pymongo import MongoClient - - # The first pymongo.Connection() argument (host) can be - # a list of ['host:port'] elements or a mongodb connection - # URI. If this is the case, don't use self.port - # but let pymongo get the port(s) from the URI instead. - # This enables the use of replica sets and sharding. - # See pymongo.Connection() for more info. - url = self.host - if isinstance(url, string_t) \ - and not url.startswith('mongodb://'): - url = 'mongodb://{0}:{1}'.format(url, self.port) - if url == 'mongodb://': - url = url + 'localhost' - self._connection = MongoClient(host=url, **self.options) - - return self._connection - - def process_cleanup(self): - if self._connection is not None: - # MongoDB connection will be closed automatically when object - # goes out of scope - del(self.collection) - del(self.database) - self._connection = None - - def _store_result(self, task_id, result, status, - traceback=None, request=None, **kwargs): - """Store return value and status of an executed task.""" - meta = {'_id': task_id, - 'status': status, - 'result': Binary(self.encode(result)), - 'date_done': datetime.utcnow(), - 'traceback': Binary(self.encode(traceback)), - 'children': Binary(self.encode( - self.current_task_children(request), - ))} - self.collection.save(meta) - - return result - - def _get_task_meta_for(self, task_id): - """Get task metadata for a task by id.""" - - obj = self.collection.find_one({'_id': task_id}) - if not obj: - return {'status': states.PENDING, 'result': None} - - meta = { - 'task_id': obj['_id'], - 'status': obj['status'], - 'result': self.decode(obj['result']), - 'date_done': obj['date_done'], - 'traceback': self.decode(obj['traceback']), - 'children': self.decode(obj['children']), - } - - return meta - - def _save_group(self, group_id, result): - """Save the group result.""" - meta = {'_id': group_id, - 'result': Binary(self.encode(result)), - 'date_done': datetime.utcnow()} - self.collection.save(meta) - - return result - - def _restore_group(self, group_id): - """Get the result for a group by id.""" - obj = self.collection.find_one({'_id': group_id}) - if not obj: - return - - meta = { - 'task_id': obj['_id'], - 'result': self.decode(obj['result']), - 'date_done': obj['date_done'], - } - - return meta - - def _delete_group(self, group_id): - """Delete a group by id.""" - self.collection.remove({'_id': group_id}) - - def _forget(self, task_id): - """Remove result from MongoDB. - - :raises celery.exceptions.OperationsError: - if the task_id could not be removed. - - """ - # By using safe=True, this will wait until it receives a response from - # the server. Likewise, it will raise an OperationsError if the - # response was unable to be completed. - self.collection.remove({'_id': task_id}) - - def cleanup(self): - """Delete expired metadata.""" - self.collection.remove( - {'date_done': {'$lt': self.app.now() - self.expires}}, - ) - - def __reduce__(self, args=(), kwargs={}): - return super(MongoBackend, self).__reduce__( - args, dict(kwargs, expires=self.expires, url=self.url), - ) - - def _get_database(self): - conn = self._get_connection() - db = conn[self.database_name] - if self.user and self.password: - if not db.authenticate(self.user, - self.password): - raise ImproperlyConfigured( - 'Invalid MongoDB username or password.') - return db - - @cached_property - def database(self): - """Get database from MongoDB connection and perform authentication - if necessary.""" - return self._get_database() - - @cached_property - def collection(self): - """Get the metadata task collection.""" - collection = self.database[self.taskmeta_collection] - - # Ensure an index on date_done is there, if not process the index - # in the background. Once completed cleanup will be much faster - collection.ensure_index('date_done', background='true') - return collection - - def as_uri(self, include_password=False): - """Return the backend as an URI. - - :keyword include_password: Censor passwords. - - """ - if not self.url: - return 'mongodb://' - if include_password: - return self.url - - if ',' not in self.url: - return maybe_sanitize_url(self.url) - - uri1, remainder = self.url.split(',', 1) - return ','.join([maybe_sanitize_url(uri1), remainder]) diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/redis.py b/thesisenv/lib/python3.6/site-packages/celery/backends/redis.py deleted file mode 100644 index 1e838c1..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/backends/redis.py +++ /dev/null @@ -1,295 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.backends.redis - ~~~~~~~~~~~~~~~~~~~~~ - - Redis result store backend. - -""" -from __future__ import absolute_import - -from functools import partial - -from kombu.utils import cached_property, retry_over_time -from kombu.utils.url import _parse_url - -from celery import states -from celery.canvas import maybe_signature -from celery.exceptions import ChordError, ImproperlyConfigured -from celery.five import string_t -from celery.utils import deprecated_property, strtobool -from celery.utils.functional import dictfilter -from celery.utils.log import get_logger -from celery.utils.timeutils import humanize_seconds - -from .base import KeyValueStoreBackend - -try: - import redis - from redis.exceptions import ConnectionError - from kombu.transport.redis import get_redis_error_classes -except ImportError: # pragma: no cover - redis = None # noqa - ConnectionError = None # noqa - get_redis_error_classes = None # noqa - -__all__ = ['RedisBackend'] - -REDIS_MISSING = """\ -You need to install the redis library in order to use \ -the Redis result store backend.""" - -logger = get_logger(__name__) -error = logger.error - - -class RedisBackend(KeyValueStoreBackend): - """Redis task result store.""" - - #: redis-py client module. - redis = redis - - #: Maximium number of connections in the pool. - max_connections = None - - supports_autoexpire = True - supports_native_join = True - implements_incr = True - - def __init__(self, host=None, port=None, db=None, password=None, - expires=None, max_connections=None, url=None, - connection_pool=None, new_join=False, **kwargs): - super(RedisBackend, self).__init__(**kwargs) - conf = self.app.conf - if self.redis is None: - raise ImproperlyConfigured(REDIS_MISSING) - self._client_capabilities = self._detect_client_capabilities() - - # For compatibility with the old REDIS_* configuration keys. - def _get(key): - for prefix in 'CELERY_REDIS_{0}', 'REDIS_{0}': - try: - return conf[prefix.format(key)] - except KeyError: - pass - if host and '://' in host: - url = host - host = None - - self.max_connections = ( - max_connections or _get('MAX_CONNECTIONS') or self.max_connections - ) - self._ConnectionPool = connection_pool - - self.connparams = { - 'host': _get('HOST') or 'localhost', - 'port': _get('PORT') or 6379, - 'db': _get('DB') or 0, - 'password': _get('PASSWORD'), - 'max_connections': self.max_connections, - } - if url: - self.connparams = self._params_from_url(url, self.connparams) - self.url = url - self.expires = self.prepare_expires(expires, type=int) - - try: - new_join = strtobool(self.connparams.pop('new_join')) - except KeyError: - pass - if new_join: - self.apply_chord = self._new_chord_apply - self.on_chord_part_return = self._new_chord_return - - self.connection_errors, self.channel_errors = ( - get_redis_error_classes() if get_redis_error_classes - else ((), ())) - - def _params_from_url(self, url, defaults): - scheme, host, port, user, password, path, query = _parse_url(url) - connparams = dict( - defaults, **dictfilter({ - 'host': host, 'port': port, 'password': password, - 'db': query.pop('virtual_host', None)}) - ) - - if scheme == 'socket': - # use 'path' as path to the socket… in this case - # the database number should be given in 'query' - connparams.update({ - 'connection_class': self.redis.UnixDomainSocketConnection, - 'path': '/' + path, - }) - # host+port are invalid options when using this connection type. - connparams.pop('host', None) - connparams.pop('port', None) - else: - connparams['db'] = path - - # db may be string and start with / like in kombu. - db = connparams.get('db') or 0 - db = db.strip('/') if isinstance(db, string_t) else db - connparams['db'] = int(db) - - # Query parameters override other parameters - connparams.update(query) - return connparams - - def get(self, key): - return self.client.get(key) - - def mget(self, keys): - return self.client.mget(keys) - - def ensure(self, fun, args, **policy): - retry_policy = dict(self.retry_policy, **policy) - max_retries = retry_policy.get('max_retries') - return retry_over_time( - fun, self.connection_errors, args, {}, - partial(self.on_connection_error, max_retries), - **retry_policy - ) - - def on_connection_error(self, max_retries, exc, intervals, retries): - tts = next(intervals) - error('Connection to Redis lost: Retry (%s/%s) %s.', - retries, max_retries or 'Inf', - humanize_seconds(tts, 'in ')) - return tts - - def set(self, key, value, **retry_policy): - return self.ensure(self._set, (key, value), **retry_policy) - - def _set(self, key, value): - with self.client.pipeline() as pipe: - if self.expires: - pipe.setex(key, value, self.expires) - else: - pipe.set(key, value) - pipe.publish(key, value) - pipe.execute() - - def delete(self, key): - self.client.delete(key) - - def incr(self, key): - return self.client.incr(key) - - def expire(self, key, value): - return self.client.expire(key, value) - - def _unpack_chord_result(self, tup, decode, - EXCEPTION_STATES=states.EXCEPTION_STATES, - PROPAGATE_STATES=states.PROPAGATE_STATES): - _, tid, state, retval = decode(tup) - if state in EXCEPTION_STATES: - retval = self.exception_to_python(retval) - if state in PROPAGATE_STATES: - raise ChordError('Dependency {0} raised {1!r}'.format(tid, retval)) - return retval - - def _new_chord_apply(self, header, partial_args, group_id, body, - result=None, **options): - # avoids saving the group in the redis db. - return header(*partial_args, task_id=group_id) - - def _new_chord_return(self, task, state, result, propagate=None, - PROPAGATE_STATES=states.PROPAGATE_STATES): - app = self.app - if propagate is None: - propagate = self.app.conf.CELERY_CHORD_PROPAGATES - request = task.request - tid, gid = request.id, request.group - if not gid or not tid: - return - - client = self.client - jkey = self.get_key_for_group(gid, '.j') - result = self.encode_result(result, state) - with client.pipeline() as pipe: - _, readycount, _ = pipe \ - .rpush(jkey, self.encode([1, tid, state, result])) \ - .llen(jkey) \ - .expire(jkey, 86400) \ - .execute() - - try: - callback = maybe_signature(request.chord, app=app) - total = callback['chord_size'] - if readycount == total: - decode, unpack = self.decode, self._unpack_chord_result - with client.pipeline() as pipe: - resl, _, = pipe \ - .lrange(jkey, 0, total) \ - .delete(jkey) \ - .execute() - try: - callback.delay([unpack(tup, decode) for tup in resl]) - except Exception as exc: - error('Chord callback for %r raised: %r', - request.group, exc, exc_info=1) - return self.chord_error_from_stack( - callback, - ChordError('Callback error: {0!r}'.format(exc)), - ) - except ChordError as exc: - error('Chord %r raised: %r', request.group, exc, exc_info=1) - return self.chord_error_from_stack(callback, exc) - except Exception as exc: - error('Chord %r raised: %r', request.group, exc, exc_info=1) - return self.chord_error_from_stack( - callback, ChordError('Join error: {0!r}'.format(exc)), - ) - - def _detect_client_capabilities(self, socket_connect_timeout=False): - if self.redis.VERSION < (2, 4, 4): - raise ImproperlyConfigured( - 'Redis backend requires redis-py versions 2.4.4 or later. ' - 'You have {0.__version__}'.format(redis)) - if self.redis.VERSION >= (2, 10): - socket_connect_timeout = True - return {'socket_connect_timeout': socket_connect_timeout} - - def _create_client(self, socket_timeout=None, socket_connect_timeout=None, - **params): - return self._new_redis_client( - socket_timeout=socket_timeout and float(socket_timeout), - socket_connect_timeout=socket_connect_timeout and float( - socket_connect_timeout), **params - ) - - def _new_redis_client(self, **params): - if not self._client_capabilities['socket_connect_timeout']: - params.pop('socket_connect_timeout', None) - return self.redis.Redis(connection_pool=self.ConnectionPool(**params)) - - @property - def ConnectionPool(self): - if self._ConnectionPool is None: - self._ConnectionPool = self.redis.ConnectionPool - return self._ConnectionPool - - @cached_property - def client(self): - return self._create_client(**self.connparams) - - def __reduce__(self, args=(), kwargs={}): - return super(RedisBackend, self).__reduce__( - (self.url, ), {'expires': self.expires}, - ) - - @deprecated_property(3.2, 3.3) - def host(self): - return self.connparams['host'] - - @deprecated_property(3.2, 3.3) - def port(self): - return self.connparams['port'] - - @deprecated_property(3.2, 3.3) - def db(self): - return self.connparams['db'] - - @deprecated_property(3.2, 3.3) - def password(self): - return self.connparams['password'] diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/rpc.py b/thesisenv/lib/python3.6/site-packages/celery/backends/rpc.py deleted file mode 100644 index 92bcc61..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/backends/rpc.py +++ /dev/null @@ -1,67 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.backends.rpc - ~~~~~~~~~~~~~~~~~~~ - - RPC-style result backend, using reply-to and one queue per client. - -""" -from __future__ import absolute_import - -from kombu import Consumer, Exchange -from kombu.common import maybe_declare -from kombu.utils import cached_property - -from celery import current_task -from celery.backends import amqp - -__all__ = ['RPCBackend'] - - -class RPCBackend(amqp.AMQPBackend): - persistent = False - - class Consumer(Consumer): - auto_declare = False - - def _create_exchange(self, name, type='direct', delivery_mode=2): - # uses direct to queue routing (anon exchange). - return Exchange(None) - - def on_task_call(self, producer, task_id): - maybe_declare(self.binding(producer.channel), retry=True) - - def _create_binding(self, task_id): - return self.binding - - def _many_bindings(self, ids): - return [self.binding] - - def rkey(self, task_id): - return task_id - - def destination_for(self, task_id, request): - # Request is a new argument for backends, so must still support - # old code that rely on current_task - try: - request = request or current_task.request - except AttributeError: - raise RuntimeError( - 'RPC backend missing task request for {0!r}'.format(task_id), - ) - return request.reply_to, request.correlation_id or task_id - - def on_reply_declare(self, task_id): - pass - - def as_uri(self, include_password=True): - return 'rpc://' - - @property - def binding(self): - return self.Queue(self.oid, self.exchange, self.oid, - durable=False, auto_delete=False) - - @cached_property - def oid(self): - return self.app.oid diff --git a/thesisenv/lib/python3.6/site-packages/celery/beat.py b/thesisenv/lib/python3.6/site-packages/celery/beat.py deleted file mode 100644 index 368a903..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/beat.py +++ /dev/null @@ -1,571 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.beat - ~~~~~~~~~~~ - - The periodic task scheduler. - -""" -from __future__ import absolute_import - -import errno -import os -import time -import shelve -import sys -import traceback - -from threading import Event, Thread - -from billiard import ensure_multiprocessing -from billiard.process import Process -from billiard.common import reset_signals -from kombu.utils import cached_property, reprcall -from kombu.utils.functional import maybe_evaluate - -from . import __version__ -from . import platforms -from . import signals -from .five import items, reraise, values, monotonic -from .schedules import maybe_schedule, crontab -from .utils.imports import instantiate -from .utils.timeutils import humanize_seconds -from .utils.log import get_logger, iter_open_logger_fds - -__all__ = ['SchedulingError', 'ScheduleEntry', 'Scheduler', - 'PersistentScheduler', 'Service', 'EmbeddedService'] - -logger = get_logger(__name__) -debug, info, error, warning = (logger.debug, logger.info, - logger.error, logger.warning) - -DEFAULT_MAX_INTERVAL = 300 # 5 minutes - - -class SchedulingError(Exception): - """An error occured while scheduling a task.""" - - -class ScheduleEntry(object): - """An entry in the scheduler. - - :keyword name: see :attr:`name`. - :keyword schedule: see :attr:`schedule`. - :keyword args: see :attr:`args`. - :keyword kwargs: see :attr:`kwargs`. - :keyword options: see :attr:`options`. - :keyword last_run_at: see :attr:`last_run_at`. - :keyword total_run_count: see :attr:`total_run_count`. - :keyword relative: Is the time relative to when the server starts? - - """ - - #: The task name - name = None - - #: The schedule (run_every/crontab) - schedule = None - - #: Positional arguments to apply. - args = None - - #: Keyword arguments to apply. - kwargs = None - - #: Task execution options. - options = None - - #: The time and date of when this task was last scheduled. - last_run_at = None - - #: Total number of times this task has been scheduled. - total_run_count = 0 - - def __init__(self, name=None, task=None, last_run_at=None, - total_run_count=None, schedule=None, args=(), kwargs={}, - options={}, relative=False, app=None): - self.app = app - self.name = name - self.task = task - self.args = args - self.kwargs = kwargs - self.options = options - self.schedule = maybe_schedule(schedule, relative, app=self.app) - self.last_run_at = last_run_at or self._default_now() - self.total_run_count = total_run_count or 0 - - def _default_now(self): - return self.schedule.now() if self.schedule else self.app.now() - - def _next_instance(self, last_run_at=None): - """Return a new instance of the same class, but with - its date and count fields updated.""" - return self.__class__(**dict( - self, - last_run_at=last_run_at or self._default_now(), - total_run_count=self.total_run_count + 1, - )) - __next__ = next = _next_instance # for 2to3 - - def __reduce__(self): - return self.__class__, ( - self.name, self.task, self.last_run_at, self.total_run_count, - self.schedule, self.args, self.kwargs, self.options, - ) - - def update(self, other): - """Update values from another entry. - - Does only update "editable" fields (task, schedule, args, kwargs, - options). - - """ - self.__dict__.update({'task': other.task, 'schedule': other.schedule, - 'args': other.args, 'kwargs': other.kwargs, - 'options': other.options}) - - def is_due(self): - """See :meth:`~celery.schedule.schedule.is_due`.""" - return self.schedule.is_due(self.last_run_at) - - def __iter__(self): - return iter(items(vars(self))) - - def __repr__(self): - return '%s', entry.task, result.id) - return next_time_to_run - - def tick(self): - """Run a tick, that is one iteration of the scheduler. - - Executes all due tasks. - - """ - remaining_times = [] - try: - for entry in values(self.schedule): - next_time_to_run = self.maybe_due(entry, self.publisher) - if next_time_to_run: - remaining_times.append(next_time_to_run) - except RuntimeError: - pass - - return min(remaining_times + [self.max_interval]) - - def should_sync(self): - return ( - (not self._last_sync or - (monotonic() - self._last_sync) > self.sync_every) or - (self.sync_every_tasks and - self._tasks_since_sync >= self.sync_every_tasks) - ) - - def reserve(self, entry): - new_entry = self.schedule[entry.name] = next(entry) - return new_entry - - def apply_async(self, entry, publisher=None, **kwargs): - # Update timestamps and run counts before we actually execute, - # so we have that done if an exception is raised (doesn't schedule - # forever.) - entry = self.reserve(entry) - task = self.app.tasks.get(entry.task) - - try: - if task: - result = task.apply_async(entry.args, entry.kwargs, - publisher=publisher, - **entry.options) - else: - result = self.send_task(entry.task, entry.args, entry.kwargs, - publisher=publisher, - **entry.options) - except Exception as exc: - reraise(SchedulingError, SchedulingError( - "Couldn't apply scheduled task {0.name}: {exc}".format( - entry, exc=exc)), sys.exc_info()[2]) - finally: - self._tasks_since_sync += 1 - if self.should_sync(): - self._do_sync() - return result - - def send_task(self, *args, **kwargs): - return self.app.send_task(*args, **kwargs) - - def setup_schedule(self): - self.install_default_entries(self.data) - - def _do_sync(self): - try: - debug('beat: Synchronizing schedule...') - self.sync() - finally: - self._last_sync = monotonic() - self._tasks_since_sync = 0 - - def sync(self): - pass - - def close(self): - self.sync() - - def add(self, **kwargs): - entry = self.Entry(app=self.app, **kwargs) - self.schedule[entry.name] = entry - return entry - - def _maybe_entry(self, name, entry): - if isinstance(entry, self.Entry): - entry.app = self.app - return entry - return self.Entry(**dict(entry, name=name, app=self.app)) - - def update_from_dict(self, dict_): - self.schedule.update(dict( - (name, self._maybe_entry(name, entry)) - for name, entry in items(dict_))) - - def merge_inplace(self, b): - schedule = self.schedule - A, B = set(schedule), set(b) - - # Remove items from disk not in the schedule anymore. - for key in A ^ B: - schedule.pop(key, None) - - # Update and add new items in the schedule - for key in B: - entry = self.Entry(**dict(b[key], name=key, app=self.app)) - if schedule.get(key): - schedule[key].update(entry) - else: - schedule[key] = entry - - def _ensure_connected(self): - # callback called for each retry while the connection - # can't be established. - def _error_handler(exc, interval): - error('beat: Connection error: %s. ' - 'Trying again in %s seconds...', exc, interval) - - return self.connection.ensure_connection( - _error_handler, self.app.conf.BROKER_CONNECTION_MAX_RETRIES - ) - - def get_schedule(self): - return self.data - - def set_schedule(self, schedule): - self.data = schedule - schedule = property(get_schedule, set_schedule) - - @cached_property - def connection(self): - return self.app.connection() - - @cached_property - def publisher(self): - return self.Publisher(self._ensure_connected()) - - @property - def info(self): - return '' - - -class PersistentScheduler(Scheduler): - persistence = shelve - known_suffixes = ('', '.db', '.dat', '.bak', '.dir') - - _store = None - - def __init__(self, *args, **kwargs): - self.schedule_filename = kwargs.get('schedule_filename') - Scheduler.__init__(self, *args, **kwargs) - - def _remove_db(self): - for suffix in self.known_suffixes: - with platforms.ignore_errno(errno.ENOENT): - os.remove(self.schedule_filename + suffix) - - def _open_schedule(self): - return self.persistence.open(self.schedule_filename, writeback=True) - - def _destroy_open_corrupted_schedule(self, exc): - error('Removing corrupted schedule file %r: %r', - self.schedule_filename, exc, exc_info=True) - self._remove_db() - return self._open_schedule() - - def setup_schedule(self): - try: - self._store = self._open_schedule() - # In some cases there may be different errors from a storage - # backend for corrupted files. Example - DBPageNotFoundError - # exception from bsddb. In such case the file will be - # successfully opened but the error will be raised on first key - # retrieving. - self._store.keys() - except Exception as exc: - self._store = self._destroy_open_corrupted_schedule(exc) - - for _ in (1, 2): - try: - self._store['entries'] - except KeyError: - # new schedule db - try: - self._store['entries'] = {} - except KeyError as exc: - self._store = self._destroy_open_corrupted_schedule(exc) - continue - else: - if '__version__' not in self._store: - warning('DB Reset: Account for new __version__ field') - self._store.clear() # remove schedule at 2.2.2 upgrade. - elif 'tz' not in self._store: - warning('DB Reset: Account for new tz field') - self._store.clear() # remove schedule at 3.0.8 upgrade - elif 'utc_enabled' not in self._store: - warning('DB Reset: Account for new utc_enabled field') - self._store.clear() # remove schedule at 3.0.9 upgrade - break - - tz = self.app.conf.CELERY_TIMEZONE - stored_tz = self._store.get('tz') - if stored_tz is not None and stored_tz != tz: - warning('Reset: Timezone changed from %r to %r', stored_tz, tz) - self._store.clear() # Timezone changed, reset db! - utc = self.app.conf.CELERY_ENABLE_UTC - stored_utc = self._store.get('utc_enabled') - if stored_utc is not None and stored_utc != utc: - choices = {True: 'enabled', False: 'disabled'} - warning('Reset: UTC changed from %s to %s', - choices[stored_utc], choices[utc]) - self._store.clear() # UTC setting changed, reset db! - entries = self._store.setdefault('entries', {}) - self.merge_inplace(self.app.conf.CELERYBEAT_SCHEDULE) - self.install_default_entries(self.schedule) - self._store.update(__version__=__version__, tz=tz, utc_enabled=utc) - self.sync() - debug('Current schedule:\n' + '\n'.join( - repr(entry) for entry in values(entries))) - - def get_schedule(self): - return self._store['entries'] - - def set_schedule(self, schedule): - self._store['entries'] = schedule - schedule = property(get_schedule, set_schedule) - - def sync(self): - if self._store is not None: - self._store.sync() - - def close(self): - self.sync() - self._store.close() - - @property - def info(self): - return ' . db -> {self.schedule_filename}'.format(self=self) - - -class Service(object): - scheduler_cls = PersistentScheduler - - def __init__(self, app, max_interval=None, schedule_filename=None, - scheduler_cls=None): - self.app = app - self.max_interval = (max_interval or - app.conf.CELERYBEAT_MAX_LOOP_INTERVAL) - self.scheduler_cls = scheduler_cls or self.scheduler_cls - self.schedule_filename = ( - schedule_filename or app.conf.CELERYBEAT_SCHEDULE_FILENAME) - - self._is_shutdown = Event() - self._is_stopped = Event() - - def __reduce__(self): - return self.__class__, (self.max_interval, self.schedule_filename, - self.scheduler_cls, self.app) - - def start(self, embedded_process=False, drift=-0.010): - info('beat: Starting...') - debug('beat: Ticking with max interval->%s', - humanize_seconds(self.scheduler.max_interval)) - - signals.beat_init.send(sender=self) - if embedded_process: - signals.beat_embedded_init.send(sender=self) - platforms.set_process_title('celery beat') - - try: - while not self._is_shutdown.is_set(): - interval = self.scheduler.tick() - interval = interval + drift if interval else interval - if interval and interval > 0: - debug('beat: Waking up %s.', - humanize_seconds(interval, prefix='in ')) - time.sleep(interval) - if self.scheduler.should_sync(): - self.scheduler._do_sync() - except (KeyboardInterrupt, SystemExit): - self._is_shutdown.set() - finally: - self.sync() - - def sync(self): - self.scheduler.close() - self._is_stopped.set() - - def stop(self, wait=False): - info('beat: Shutting down...') - self._is_shutdown.set() - wait and self._is_stopped.wait() # block until shutdown done. - - def get_scheduler(self, lazy=False): - filename = self.schedule_filename - scheduler = instantiate(self.scheduler_cls, - app=self.app, - schedule_filename=filename, - max_interval=self.max_interval, - lazy=lazy) - return scheduler - - @cached_property - def scheduler(self): - return self.get_scheduler() - - -class _Threaded(Thread): - """Embedded task scheduler using threading.""" - - def __init__(self, app, **kwargs): - super(_Threaded, self).__init__() - self.app = app - self.service = Service(app, **kwargs) - self.daemon = True - self.name = 'Beat' - - def run(self): - self.app.set_current() - self.service.start() - - def stop(self): - self.service.stop(wait=True) - - -try: - ensure_multiprocessing() -except NotImplementedError: # pragma: no cover - _Process = None -else: - class _Process(Process): # noqa - - def __init__(self, app, **kwargs): - super(_Process, self).__init__() - self.app = app - self.service = Service(app, **kwargs) - self.name = 'Beat' - - def run(self): - reset_signals(full=False) - platforms.close_open_fds([ - sys.__stdin__, sys.__stdout__, sys.__stderr__, - ] + list(iter_open_logger_fds())) - self.app.set_default() - self.app.set_current() - self.service.start(embedded_process=True) - - def stop(self): - self.service.stop() - self.terminate() - - -def EmbeddedService(app, max_interval=None, **kwargs): - """Return embedded clock service. - - :keyword thread: Run threaded instead of as a separate process. - Uses :mod:`multiprocessing` by default, if available. - - """ - if kwargs.pop('thread', False) or _Process is None: - # Need short max interval to be able to stop thread - # in reasonable time. - return _Threaded(app, max_interval=1, **kwargs) - return _Process(app, max_interval=max_interval, **kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/bin/__init__.py deleted file mode 100644 index 3f44b50..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/bin/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from __future__ import absolute_import - -from .base import Option - -__all__ = ['Option'] diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/amqp.py b/thesisenv/lib/python3.6/site-packages/celery/bin/amqp.py deleted file mode 100644 index ce3b351..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/bin/amqp.py +++ /dev/null @@ -1,380 +0,0 @@ -# -*- coding: utf-8 -*- -""" -The :program:`celery amqp` command. - -.. program:: celery amqp - -""" -from __future__ import absolute_import, print_function, unicode_literals - -import cmd -import sys -import shlex -import pprint - -from functools import partial -from itertools import count - -from kombu.utils.encoding import safe_str - -from celery.utils.functional import padlist - -from celery.bin.base import Command -from celery.five import string_t -from celery.utils import strtobool - -__all__ = ['AMQPAdmin', 'AMQShell', 'Spec', 'amqp'] - -# Map to coerce strings to other types. -COERCE = {bool: strtobool} - -HELP_HEADER = """ -Commands --------- -""".rstrip() - -EXAMPLE_TEXT = """ -Example: - -> queue.delete myqueue yes no -""" - -say = partial(print, file=sys.stderr) - - -class Spec(object): - """AMQP Command specification. - - Used to convert arguments to Python values and display various help - and tooltips. - - :param args: see :attr:`args`. - :keyword returns: see :attr:`returns`. - - .. attribute args:: - - List of arguments this command takes. Should - contain `(argument_name, argument_type)` tuples. - - .. attribute returns: - - Helpful human string representation of what this command returns. - May be :const:`None`, to signify the return type is unknown. - - """ - def __init__(self, *args, **kwargs): - self.args = args - self.returns = kwargs.get('returns') - - def coerce(self, index, value): - """Coerce value for argument at index.""" - arg_info = self.args[index] - arg_type = arg_info[1] - # Might be a custom way to coerce the string value, - # so look in the coercion map. - return COERCE.get(arg_type, arg_type)(value) - - def str_args_to_python(self, arglist): - """Process list of string arguments to values according to spec. - - e.g: - - >>> spec = Spec([('queue', str), ('if_unused', bool)]) - >>> spec.str_args_to_python('pobox', 'true') - ('pobox', True) - - """ - return tuple( - self.coerce(index, value) for index, value in enumerate(arglist)) - - def format_response(self, response): - """Format the return value of this command in a human-friendly way.""" - if not self.returns: - return 'ok.' if response is None else response - if callable(self.returns): - return self.returns(response) - return self.returns.format(response) - - def format_arg(self, name, type, default_value=None): - if default_value is not None: - return '{0}:{1}'.format(name, default_value) - return name - - def format_signature(self): - return ' '.join(self.format_arg(*padlist(list(arg), 3)) - for arg in self.args) - - -def dump_message(message): - if message is None: - return 'No messages in queue. basic.publish something.' - return {'body': message.body, - 'properties': message.properties, - 'delivery_info': message.delivery_info} - - -def format_declare_queue(ret): - return 'ok. queue:{0} messages:{1} consumers:{2}.'.format(*ret) - - -class AMQShell(cmd.Cmd): - """AMQP API Shell. - - :keyword connect: Function used to connect to the server, must return - connection object. - - :keyword silent: If :const:`True`, the commands won't have annoying - output not relevant when running in non-shell mode. - - - .. attribute: builtins - - Mapping of built-in command names -> method names - - .. attribute:: amqp - - Mapping of AMQP API commands and their :class:`Spec`. - - """ - conn = None - chan = None - prompt_fmt = '{self.counter}> ' - identchars = cmd.IDENTCHARS = '.' - needs_reconnect = False - counter = 1 - inc_counter = count(2) - - builtins = {'EOF': 'do_exit', - 'exit': 'do_exit', - 'help': 'do_help'} - - amqp = { - 'exchange.declare': Spec(('exchange', str), - ('type', str), - ('passive', bool, 'no'), - ('durable', bool, 'no'), - ('auto_delete', bool, 'no'), - ('internal', bool, 'no')), - 'exchange.delete': Spec(('exchange', str), - ('if_unused', bool)), - 'queue.bind': Spec(('queue', str), - ('exchange', str), - ('routing_key', str)), - 'queue.declare': Spec(('queue', str), - ('passive', bool, 'no'), - ('durable', bool, 'no'), - ('exclusive', bool, 'no'), - ('auto_delete', bool, 'no'), - returns=format_declare_queue), - 'queue.delete': Spec(('queue', str), - ('if_unused', bool, 'no'), - ('if_empty', bool, 'no'), - returns='ok. {0} messages deleted.'), - 'queue.purge': Spec(('queue', str), - returns='ok. {0} messages deleted.'), - 'basic.get': Spec(('queue', str), - ('no_ack', bool, 'off'), - returns=dump_message), - 'basic.publish': Spec(('msg', str), - ('exchange', str), - ('routing_key', str), - ('mandatory', bool, 'no'), - ('immediate', bool, 'no')), - 'basic.ack': Spec(('delivery_tag', int)), - } - - def _prepare_spec(self, conn): - # XXX Hack to fix Issue #2013 - from amqp import Connection, Message - if isinstance(conn.connection, Connection): - self.amqp['basic.publish'] = Spec(('msg', Message), - ('exchange', str), - ('routing_key', str), - ('mandatory', bool, 'no'), - ('immediate', bool, 'no')) - - def __init__(self, *args, **kwargs): - self.connect = kwargs.pop('connect') - self.silent = kwargs.pop('silent', False) - self.out = kwargs.pop('out', sys.stderr) - cmd.Cmd.__init__(self, *args, **kwargs) - self._reconnect() - - def note(self, m): - """Say something to the user. Disabled if :attr:`silent`.""" - if not self.silent: - say(m, file=self.out) - - def say(self, m): - say(m, file=self.out) - - def get_amqp_api_command(self, cmd, arglist): - """With a command name and a list of arguments, convert the arguments - to Python values and find the corresponding method on the AMQP channel - object. - - :returns: tuple of `(method, processed_args)`. - - """ - spec = self.amqp[cmd] - args = spec.str_args_to_python(arglist) - attr_name = cmd.replace('.', '_') - if self.needs_reconnect: - self._reconnect() - return getattr(self.chan, attr_name), args, spec.format_response - - def do_exit(self, *args): - """The `'exit'` command.""" - self.note("\n-> please, don't leave!") - sys.exit(0) - - def display_command_help(self, cmd, short=False): - spec = self.amqp[cmd] - self.say('{0} {1}'.format(cmd, spec.format_signature())) - - def do_help(self, *args): - if not args: - self.say(HELP_HEADER) - for cmd_name in self.amqp: - self.display_command_help(cmd_name, short=True) - self.say(EXAMPLE_TEXT) - else: - self.display_command_help(args[0]) - - def default(self, line): - self.say("unknown syntax: {0!r}. how about some 'help'?".format(line)) - - def get_names(self): - return set(self.builtins) | set(self.amqp) - - def completenames(self, text, *ignored): - """Return all commands starting with `text`, for tab-completion.""" - names = self.get_names() - first = [cmd for cmd in names - if cmd.startswith(text.replace('_', '.'))] - if first: - return first - return [cmd for cmd in names - if cmd.partition('.')[2].startswith(text)] - - def dispatch(self, cmd, argline): - """Dispatch and execute the command. - - Lookup order is: :attr:`builtins` -> :attr:`amqp`. - - """ - arglist = shlex.split(safe_str(argline)) - if cmd in self.builtins: - return getattr(self, self.builtins[cmd])(*arglist) - fun, args, formatter = self.get_amqp_api_command(cmd, arglist) - return formatter(fun(*args)) - - def parseline(self, line): - """Parse input line. - - :returns: tuple of three items: - `(command_name, arglist, original_line)` - - """ - parts = line.split() - if parts: - return parts[0], ' '.join(parts[1:]), line - return '', '', line - - def onecmd(self, line): - """Parse line and execute command.""" - cmd, arg, line = self.parseline(line) - if not line: - return self.emptyline() - self.lastcmd = line - self.counter = next(self.inc_counter) - try: - self.respond(self.dispatch(cmd, arg)) - except (AttributeError, KeyError) as exc: - self.default(line) - except Exception as exc: - self.say(exc) - self.needs_reconnect = True - - def respond(self, retval): - """What to do with the return value of a command.""" - if retval is not None: - if isinstance(retval, string_t): - self.say(retval) - else: - self.say(pprint.pformat(retval)) - - def _reconnect(self): - """Re-establish connection to the AMQP server.""" - self.conn = self.connect(self.conn) - self._prepare_spec(self.conn) - self.chan = self.conn.default_channel - self.needs_reconnect = False - - @property - def prompt(self): - return self.prompt_fmt.format(self=self) - - -class AMQPAdmin(object): - """The celery :program:`celery amqp` utility.""" - Shell = AMQShell - - def __init__(self, *args, **kwargs): - self.app = kwargs['app'] - self.out = kwargs.setdefault('out', sys.stderr) - self.silent = kwargs.get('silent') - self.args = args - - def connect(self, conn=None): - if conn: - conn.close() - conn = self.app.connection() - self.note('-> connecting to {0}.'.format(conn.as_uri())) - conn.connect() - self.note('-> connected.') - return conn - - def run(self): - shell = self.Shell(connect=self.connect, out=self.out) - if self.args: - return shell.onecmd(' '.join(self.args)) - try: - return shell.cmdloop() - except KeyboardInterrupt: - self.note('(bibi)') - pass - - def note(self, m): - if not self.silent: - say(m, file=self.out) - - -class amqp(Command): - """AMQP Administration Shell. - - Also works for non-amqp transports (but not ones that - store declarations in memory). - - Examples:: - - celery amqp - start shell mode - celery amqp help - show list of commands - - celery amqp exchange.delete name - celery amqp queue.delete queue - celery amqp queue.delete queue yes yes - - """ - - def run(self, *args, **options): - options['app'] = self.app - return AMQPAdmin(*args, **options).run() - - -def main(): - amqp().execute_from_commandline() - -if __name__ == '__main__': # pragma: no cover - main() diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/base.py b/thesisenv/lib/python3.6/site-packages/celery/bin/base.py deleted file mode 100644 index 9044b7b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/bin/base.py +++ /dev/null @@ -1,668 +0,0 @@ -# -*- coding: utf-8 -*- -""" - -.. _preload-options: - -Preload Options ---------------- - -These options are supported by all commands, -and usually parsed before command-specific arguments. - -.. cmdoption:: -A, --app - - app instance to use (e.g. module.attr_name) - -.. cmdoption:: -b, --broker - - url to broker. default is 'amqp://guest@localhost//' - -.. cmdoption:: --loader - - name of custom loader class to use. - -.. cmdoption:: --config - - Name of the configuration module - -.. _daemon-options: - -Daemon Options --------------- - -These options are supported by commands that can detach -into the background (daemon). They will be present -in any command that also has a `--detach` option. - -.. cmdoption:: -f, --logfile - - Path to log file. If no logfile is specified, `stderr` is used. - -.. cmdoption:: --pidfile - - Optional file used to store the process pid. - - The program will not start if this file already exists - and the pid is still alive. - -.. cmdoption:: --uid - - User id, or user name of the user to run as after detaching. - -.. cmdoption:: --gid - - Group id, or group name of the main group to change to after - detaching. - -.. cmdoption:: --umask - - Effective umask (in octal) of the process after detaching. Inherits - the umask of the parent process by default. - -.. cmdoption:: --workdir - - Optional directory to change to after detaching. - -.. cmdoption:: --executable - - Executable to use for the detached process. - -""" -from __future__ import absolute_import, print_function, unicode_literals - -import os -import random -import re -import sys -import warnings -import json - -from collections import defaultdict -from heapq import heappush -from inspect import getargspec -from optparse import OptionParser, IndentedHelpFormatter, make_option as Option -from pprint import pformat - -from celery import VERSION_BANNER, Celery, maybe_patch_concurrency -from celery import signals -from celery.exceptions import CDeprecationWarning, CPendingDeprecationWarning -from celery.five import items, string, string_t -from celery.platforms import EX_FAILURE, EX_OK, EX_USAGE -from celery.utils import term -from celery.utils import text -from celery.utils import node_format, host_format -from celery.utils.imports import symbol_by_name, import_from_cwd - -try: - input = raw_input -except NameError: - pass - -# always enable DeprecationWarnings, so our users can see them. -for warning in (CDeprecationWarning, CPendingDeprecationWarning): - warnings.simplefilter('once', warning, 0) - -ARGV_DISABLED = """ -Unrecognized command-line arguments: {0} - -Try --help? -""" - -find_long_opt = re.compile(r'.+?(--.+?)(?:\s|,|$)') -find_rst_ref = re.compile(r':\w+:`(.+?)`') - -__all__ = ['Error', 'UsageError', 'Extensions', 'HelpFormatter', - 'Command', 'Option', 'daemon_options'] - - -class Error(Exception): - status = EX_FAILURE - - def __init__(self, reason, status=None): - self.reason = reason - self.status = status if status is not None else self.status - super(Error, self).__init__(reason, status) - - def __str__(self): - return self.reason - __unicode__ = __str__ - - -class UsageError(Error): - status = EX_USAGE - - -class Extensions(object): - - def __init__(self, namespace, register): - self.names = [] - self.namespace = namespace - self.register = register - - def add(self, cls, name): - heappush(self.names, name) - self.register(cls, name=name) - - def load(self): - try: - from pkg_resources import iter_entry_points - except ImportError: # pragma: no cover - return - - for ep in iter_entry_points(self.namespace): - sym = ':'.join([ep.module_name, ep.attrs[0]]) - try: - cls = symbol_by_name(sym) - except (ImportError, SyntaxError) as exc: - warnings.warn( - 'Cannot load extension {0!r}: {1!r}'.format(sym, exc)) - else: - self.add(cls, ep.name) - return self.names - - -class HelpFormatter(IndentedHelpFormatter): - - def format_epilog(self, epilog): - if epilog: - return '\n{0}\n\n'.format(epilog) - return '' - - def format_description(self, description): - return text.ensure_2lines(text.fill_paragraphs( - text.dedent(description), self.width)) - - -class Command(object): - """Base class for command-line applications. - - :keyword app: The current app. - :keyword get_app: Callable returning the current app if no app provided. - - """ - Error = Error - UsageError = UsageError - Parser = OptionParser - - #: Arg list used in help. - args = '' - - #: Application version. - version = VERSION_BANNER - - #: If false the parser will raise an exception if positional - #: args are provided. - supports_args = True - - #: List of options (without preload options). - option_list = () - - # module Rst documentation to parse help from (if any) - doc = None - - # Some programs (multi) does not want to load the app specified - # (Issue #1008). - respects_app_option = True - - #: List of options to parse before parsing other options. - preload_options = ( - Option('-A', '--app', default=None), - Option('-b', '--broker', default=None), - Option('--loader', default=None), - Option('--config', default=None), - Option('--workdir', default=None, dest='working_directory'), - Option('--no-color', '-C', action='store_true', default=None), - Option('--quiet', '-q', action='store_true'), - ) - - #: Enable if the application should support config from the cmdline. - enable_config_from_cmdline = False - - #: Default configuration namespace. - namespace = 'celery' - - #: Text to print at end of --help - epilog = None - - #: Text to print in --help before option list. - description = '' - - #: Set to true if this command doesn't have subcommands - leaf = True - - # used by :meth:`say_remote_command_reply`. - show_body = True - # used by :meth:`say_chat`. - show_reply = True - - prog_name = 'celery' - - def __init__(self, app=None, get_app=None, no_color=False, - stdout=None, stderr=None, quiet=False, on_error=None, - on_usage_error=None): - self.app = app - self.get_app = get_app or self._get_default_app - self.stdout = stdout or sys.stdout - self.stderr = stderr or sys.stderr - self._colored = None - self._no_color = no_color - self.quiet = quiet - if not self.description: - self.description = self.__doc__ - if on_error: - self.on_error = on_error - if on_usage_error: - self.on_usage_error = on_usage_error - - def run(self, *args, **options): - """This is the body of the command called by :meth:`handle_argv`.""" - raise NotImplementedError('subclass responsibility') - - def on_error(self, exc): - self.error(self.colored.red('Error: {0}'.format(exc))) - - def on_usage_error(self, exc): - self.handle_error(exc) - - def on_concurrency_setup(self): - pass - - def __call__(self, *args, **kwargs): - random.seed() # maybe we were forked. - self.verify_args(args) - try: - ret = self.run(*args, **kwargs) - return ret if ret is not None else EX_OK - except self.UsageError as exc: - self.on_usage_error(exc) - return exc.status - except self.Error as exc: - self.on_error(exc) - return exc.status - - def verify_args(self, given, _index=0): - S = getargspec(self.run) - _index = 1 if S.args and S.args[0] == 'self' else _index - required = S.args[_index:-len(S.defaults) if S.defaults else None] - missing = required[len(given):] - if missing: - raise self.UsageError('Missing required {0}: {1}'.format( - text.pluralize(len(missing), 'argument'), - ', '.join(missing) - )) - - def execute_from_commandline(self, argv=None): - """Execute application from command-line. - - :keyword argv: The list of command-line arguments. - Defaults to ``sys.argv``. - - """ - if argv is None: - argv = list(sys.argv) - # Should we load any special concurrency environment? - self.maybe_patch_concurrency(argv) - self.on_concurrency_setup() - - # Dump version and exit if '--version' arg set. - self.early_version(argv) - argv = self.setup_app_from_commandline(argv) - self.prog_name = os.path.basename(argv[0]) - return self.handle_argv(self.prog_name, argv[1:]) - - def run_from_argv(self, prog_name, argv=None, command=None): - return self.handle_argv(prog_name, - sys.argv if argv is None else argv, command) - - def maybe_patch_concurrency(self, argv=None): - argv = argv or sys.argv - pool_option = self.with_pool_option(argv) - if pool_option: - maybe_patch_concurrency(argv, *pool_option) - short_opts, long_opts = pool_option - - def usage(self, command): - return '%prog {0} [options] {self.args}'.format(command, self=self) - - def get_options(self): - """Get supported command-line options.""" - return self.option_list - - def expanduser(self, value): - if isinstance(value, string_t): - return os.path.expanduser(value) - return value - - def ask(self, q, choices, default=None): - """Prompt user to choose from a tuple of string values. - - :param q: the question to ask (do not include questionark) - :param choice: tuple of possible choices, must be lowercase. - :param default: Default value if any. - - If a default is not specified the question will be repeated - until the user gives a valid choice. - - Matching is done case insensitively. - - """ - schoices = choices - if default is not None: - schoices = [c.upper() if c == default else c.lower() - for c in choices] - schoices = '/'.join(schoices) - - p = '{0} ({1})? '.format(q.capitalize(), schoices) - while 1: - val = input(p).lower() - if val in choices: - return val - elif default is not None: - break - return default - - def handle_argv(self, prog_name, argv, command=None): - """Parse command-line arguments from ``argv`` and dispatch - to :meth:`run`. - - :param prog_name: The program name (``argv[0]``). - :param argv: Command arguments. - - Exits with an error message if :attr:`supports_args` is disabled - and ``argv`` contains positional arguments. - - """ - options, args = self.prepare_args( - *self.parse_options(prog_name, argv, command)) - return self(*args, **options) - - def prepare_args(self, options, args): - if options: - options = dict((k, self.expanduser(v)) - for k, v in items(vars(options)) - if not k.startswith('_')) - args = [self.expanduser(arg) for arg in args] - self.check_args(args) - return options, args - - def check_args(self, args): - if not self.supports_args and args: - self.die(ARGV_DISABLED.format(', '.join(args)), EX_USAGE) - - def error(self, s): - self.out(s, fh=self.stderr) - - def out(self, s, fh=None): - print(s, file=fh or self.stdout) - - def die(self, msg, status=EX_FAILURE): - self.error(msg) - sys.exit(status) - - def early_version(self, argv): - if '--version' in argv: - print(self.version, file=self.stdout) - sys.exit(0) - - def parse_options(self, prog_name, arguments, command=None): - """Parse the available options.""" - # Don't want to load configuration to just print the version, - # so we handle --version manually here. - self.parser = self.create_parser(prog_name, command) - return self.parser.parse_args(arguments) - - def create_parser(self, prog_name, command=None): - option_list = ( - self.preload_options + - self.get_options() + - tuple(self.app.user_options['preload']) - ) - return self.prepare_parser(self.Parser( - prog=prog_name, - usage=self.usage(command), - version=self.version, - epilog=self.epilog, - formatter=HelpFormatter(), - description=self.description, - option_list=option_list, - )) - - def prepare_parser(self, parser): - docs = [self.parse_doc(doc) for doc in (self.doc, __doc__) if doc] - for doc in docs: - for long_opt, help in items(doc): - option = parser.get_option(long_opt) - if option is not None: - option.help = ' '.join(help).format(default=option.default) - return parser - - def setup_app_from_commandline(self, argv): - preload_options = self.parse_preload_options(argv) - quiet = preload_options.get('quiet') - if quiet is not None: - self.quiet = quiet - try: - self.no_color = preload_options['no_color'] - except KeyError: - pass - workdir = preload_options.get('working_directory') - if workdir: - os.chdir(workdir) - app = (preload_options.get('app') or - os.environ.get('CELERY_APP') or - self.app) - preload_loader = preload_options.get('loader') - if preload_loader: - # Default app takes loader from this env (Issue #1066). - os.environ['CELERY_LOADER'] = preload_loader - loader = (preload_loader, - os.environ.get('CELERY_LOADER') or - 'default') - broker = preload_options.get('broker', None) - if broker: - os.environ['CELERY_BROKER_URL'] = broker - config = preload_options.get('config') - if config: - os.environ['CELERY_CONFIG_MODULE'] = config - if self.respects_app_option: - if app: - self.app = self.find_app(app) - elif self.app is None: - self.app = self.get_app(loader=loader) - if self.enable_config_from_cmdline: - argv = self.process_cmdline_config(argv) - else: - self.app = Celery(fixups=[]) - - user_preload = tuple(self.app.user_options['preload'] or ()) - if user_preload: - user_options = self.preparse_options(argv, user_preload) - for user_option in user_preload: - user_options.setdefault(user_option.dest, user_option.default) - signals.user_preload_options.send( - sender=self, app=self.app, options=user_options, - ) - return argv - - def find_app(self, app): - from celery.app.utils import find_app - return find_app(app, symbol_by_name=self.symbol_by_name) - - def symbol_by_name(self, name, imp=import_from_cwd): - return symbol_by_name(name, imp=imp) - get_cls_by_name = symbol_by_name # XXX compat - - def process_cmdline_config(self, argv): - try: - cargs_start = argv.index('--') - except ValueError: - return argv - argv, cargs = argv[:cargs_start], argv[cargs_start + 1:] - self.app.config_from_cmdline(cargs, namespace=self.namespace) - return argv - - def parse_preload_options(self, args): - return self.preparse_options(args, self.preload_options) - - def add_append_opt(self, acc, opt, value): - acc.setdefault(opt.dest, opt.default or []) - acc[opt.dest].append(value) - - def preparse_options(self, args, options): - acc = {} - opts = {} - for opt in options: - for t in (opt._long_opts, opt._short_opts): - opts.update(dict(zip(t, [opt] * len(t)))) - index = 0 - length = len(args) - while index < length: - arg = args[index] - if arg.startswith('--'): - if '=' in arg: - key, value = arg.split('=', 1) - opt = opts.get(key) - if opt: - if opt.action == 'append': - self.add_append_opt(acc, opt, value) - else: - acc[opt.dest] = value - else: - opt = opts.get(arg) - if opt and opt.takes_value(): - # optparse also supports ['--opt', 'value'] - # (Issue #1668) - if opt.action == 'append': - self.add_append_opt(acc, opt, args[index + 1]) - else: - acc[opt.dest] = args[index + 1] - index += 1 - elif opt and opt.action == 'store_true': - acc[opt.dest] = True - elif arg.startswith('-'): - opt = opts.get(arg) - if opt: - if opt.takes_value(): - try: - acc[opt.dest] = args[index + 1] - except IndexError: - raise ValueError( - 'Missing required argument for {0}'.format( - arg)) - index += 1 - elif opt.action == 'store_true': - acc[opt.dest] = True - index += 1 - return acc - - def parse_doc(self, doc): - options, in_option = defaultdict(list), None - for line in doc.splitlines(): - if line.startswith('.. cmdoption::'): - m = find_long_opt.match(line) - if m: - in_option = m.groups()[0].strip() - assert in_option, 'missing long opt' - elif in_option and line.startswith(' ' * 4): - options[in_option].append( - find_rst_ref.sub(r'\1', line.strip()).replace('`', '')) - return options - - def with_pool_option(self, argv): - """Return tuple of ``(short_opts, long_opts)`` if the command - supports a pool argument, and used to monkey patch eventlet/gevent - environments as early as possible. - - E.g:: - has_pool_option = (['-P'], ['--pool']) - """ - pass - - def node_format(self, s, nodename, **extra): - return node_format(s, nodename, **extra) - - def host_format(self, s, **extra): - return host_format(s, **extra) - - def _get_default_app(self, *args, **kwargs): - from celery._state import get_current_app - return get_current_app() # omit proxy - - def pretty_list(self, n): - c = self.colored - if not n: - return '- empty -' - return '\n'.join( - str(c.reset(c.white('*'), ' {0}'.format(item))) for item in n - ) - - def pretty_dict_ok_error(self, n): - c = self.colored - try: - return (c.green('OK'), - text.indent(self.pretty(n['ok'])[1], 4)) - except KeyError: - pass - return (c.red('ERROR'), - text.indent(self.pretty(n['error'])[1], 4)) - - def say_remote_command_reply(self, replies): - c = self.colored - node = next(iter(replies)) # <-- take first. - reply = replies[node] - status, preply = self.pretty(reply) - self.say_chat('->', c.cyan(node, ': ') + status, - text.indent(preply, 4) if self.show_reply else '') - - def pretty(self, n): - OK = str(self.colored.green('OK')) - if isinstance(n, list): - return OK, self.pretty_list(n) - if isinstance(n, dict): - if 'ok' in n or 'error' in n: - return self.pretty_dict_ok_error(n) - else: - return OK, json.dumps(n, sort_keys=True, indent=4) - if isinstance(n, string_t): - return OK, string(n) - return OK, pformat(n) - - def say_chat(self, direction, title, body=''): - c = self.colored - if direction == '<-' and self.quiet: - return - dirstr = not self.quiet and c.bold(c.white(direction), ' ') or '' - self.out(c.reset(dirstr, title)) - if body and self.show_body: - self.out(body) - - @property - def colored(self): - if self._colored is None: - self._colored = term.colored(enabled=not self.no_color) - return self._colored - - @colored.setter - def colored(self, obj): - self._colored = obj - - @property - def no_color(self): - return self._no_color - - @no_color.setter - def no_color(self, value): - self._no_color = value - if self._colored is not None: - self._colored.enabled = not self._no_color - - -def daemon_options(default_pidfile=None, default_logfile=None): - return ( - Option('-f', '--logfile', default=default_logfile), - Option('--pidfile', default=default_pidfile), - Option('--uid', default=None), - Option('--gid', default=None), - Option('--umask', default=None), - Option('--executable', default=None), - ) diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/beat.py b/thesisenv/lib/python3.6/site-packages/celery/bin/beat.py deleted file mode 100644 index 4bcbc62..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/bin/beat.py +++ /dev/null @@ -1,100 +0,0 @@ -# -*- coding: utf-8 -*- -""" - -The :program:`celery beat` command. - -.. program:: celery beat - -.. seealso:: - - See :ref:`preload-options` and :ref:`daemon-options`. - -.. cmdoption:: --detach - - Detach and run in the background as a daemon. - -.. cmdoption:: -s, --schedule - - Path to the schedule database. Defaults to `celerybeat-schedule`. - The extension '.db' may be appended to the filename. - Default is {default}. - -.. cmdoption:: -S, --scheduler - - Scheduler class to use. - Default is :class:`celery.beat.PersistentScheduler`. - -.. cmdoption:: --max-interval - - Max seconds to sleep between schedule iterations. - -.. cmdoption:: -f, --logfile - - Path to log file. If no logfile is specified, `stderr` is used. - -.. cmdoption:: -l, --loglevel - - Logging level, choose between `DEBUG`, `INFO`, `WARNING`, - `ERROR`, `CRITICAL`, or `FATAL`. - -""" -from __future__ import absolute_import - -from functools import partial - -from celery.platforms import detached, maybe_drop_privileges - -from celery.bin.base import Command, Option, daemon_options - -__all__ = ['beat'] - - -class beat(Command): - """Start the beat periodic task scheduler. - - Examples:: - - celery beat -l info - celery beat -s /var/run/celery/beat-schedule --detach - celery beat -S djcelery.schedulers.DatabaseScheduler - - """ - doc = __doc__ - enable_config_from_cmdline = True - supports_args = False - - def run(self, detach=False, logfile=None, pidfile=None, uid=None, - gid=None, umask=None, working_directory=None, **kwargs): - if not detach: - maybe_drop_privileges(uid=uid, gid=gid) - workdir = working_directory - kwargs.pop('app', None) - beat = partial(self.app.Beat, - logfile=logfile, pidfile=pidfile, **kwargs) - - if detach: - with detached(logfile, pidfile, uid, gid, umask, workdir): - return beat().run() - else: - return beat().run() - - def get_options(self): - c = self.app.conf - - return ( - (Option('--detach', action='store_true'), - Option('-s', '--schedule', - default=c.CELERYBEAT_SCHEDULE_FILENAME), - Option('--max-interval', type='float'), - Option('-S', '--scheduler', dest='scheduler_cls'), - Option('-l', '--loglevel', default=c.CELERYBEAT_LOG_LEVEL)) + - daemon_options(default_pidfile='celerybeat.pid') + - tuple(self.app.user_options['beat']) - ) - - -def main(app=None): - beat(app=app).execute_from_commandline() - -if __name__ == '__main__': # pragma: no cover - main() diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/celery.py b/thesisenv/lib/python3.6/site-packages/celery/bin/celery.py deleted file mode 100644 index 4676b30..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/bin/celery.py +++ /dev/null @@ -1,850 +0,0 @@ -# -*- coding: utf-8 -*- -""" - -The :program:`celery` umbrella command. - -.. program:: celery - -""" -from __future__ import absolute_import, unicode_literals - -import anyjson -import numbers -import os -import sys - -from functools import partial -from importlib import import_module - -from celery.five import string_t, values -from celery.platforms import EX_OK, EX_FAILURE, EX_UNAVAILABLE, EX_USAGE -from celery.utils import term -from celery.utils import text -from celery.utils.timeutils import maybe_iso8601 - -# Cannot use relative imports here due to a Windows issue (#1111). -from celery.bin.base import Command, Option, Extensions - -# Import commands from other modules -from celery.bin.amqp import amqp -from celery.bin.beat import beat -from celery.bin.events import events -from celery.bin.graph import graph -from celery.bin.worker import worker - -__all__ = ['CeleryCommand', 'main'] - -HELP = """ ----- -- - - ---- Commands- -------------- --- ------------ - -{commands} ----- -- - - --------- -- - -------------- --- ------------ - -Type '{prog_name} --help' for help using a specific command. -""" - -MIGRATE_PROGRESS_FMT = """\ -Migrating task {state.count}/{state.strtotal}: \ -{body[task]}[{body[id]}]\ -""" - -DEBUG = os.environ.get('C_DEBUG', False) - -command_classes = [ - ('Main', ['worker', 'events', 'beat', 'shell', 'multi', 'amqp'], 'green'), - ('Remote Control', ['status', 'inspect', 'control'], 'blue'), - ('Utils', ['purge', 'list', 'migrate', 'call', 'result', 'report'], None), -] -if DEBUG: # pragma: no cover - command_classes.append( - ('Debug', ['graph'], 'red'), - ) - - -def determine_exit_status(ret): - if isinstance(ret, numbers.Integral): - return ret - return EX_OK if ret else EX_FAILURE - - -def main(argv=None): - # Fix for setuptools generated scripts, so that it will - # work with multiprocessing fork emulation. - # (see multiprocessing.forking.get_preparation_data()) - try: - if __name__ != '__main__': # pragma: no cover - sys.modules['__main__'] = sys.modules[__name__] - cmd = CeleryCommand() - cmd.maybe_patch_concurrency() - from billiard import freeze_support - freeze_support() - cmd.execute_from_commandline(argv) - except KeyboardInterrupt: - pass - - -class multi(Command): - """Start multiple worker instances.""" - respects_app_option = False - - def get_options(self): - return () - - def run_from_argv(self, prog_name, argv, command=None): - from celery.bin.multi import MultiTool - multi = MultiTool(quiet=self.quiet, no_color=self.no_color) - return multi.execute_from_commandline( - [command] + argv, prog_name, - ) - - -class list_(Command): - """Get info from broker. - - Examples:: - - celery list bindings - - NOTE: For RabbitMQ the management plugin is required. - """ - args = '[bindings]' - - def list_bindings(self, management): - try: - bindings = management.get_bindings() - except NotImplementedError: - raise self.Error('Your transport cannot list bindings.') - - def fmt(q, e, r): - return self.out('{0:<28} {1:<28} {2}'.format(q, e, r)) - fmt('Queue', 'Exchange', 'Routing Key') - fmt('-' * 16, '-' * 16, '-' * 16) - for b in bindings: - fmt(b['destination'], b['source'], b['routing_key']) - - def run(self, what=None, *_, **kw): - topics = {'bindings': self.list_bindings} - available = ', '.join(topics) - if not what: - raise self.UsageError( - 'You must specify one of {0}'.format(available)) - if what not in topics: - raise self.UsageError( - 'unknown topic {0!r} (choose one of: {1})'.format( - what, available)) - with self.app.connection() as conn: - self.app.amqp.TaskConsumer(conn).declare() - topics[what](conn.manager) - - -class call(Command): - """Call a task by name. - - Examples:: - - celery call tasks.add --args='[2, 2]' - celery call tasks.add --args='[2, 2]' --countdown=10 - """ - args = '' - option_list = Command.option_list + ( - Option('--args', '-a', help='positional arguments (json).'), - Option('--kwargs', '-k', help='keyword arguments (json).'), - Option('--eta', help='scheduled time (ISO-8601).'), - Option('--countdown', type='float', - help='eta in seconds from now (float/int).'), - Option('--expires', help='expiry time (ISO-8601/float/int).'), - Option('--serializer', default='json', help='defaults to json.'), - Option('--queue', help='custom queue name.'), - Option('--exchange', help='custom exchange name.'), - Option('--routing-key', help='custom routing key.'), - ) - - def run(self, name, *_, **kw): - # Positional args. - args = kw.get('args') or () - if isinstance(args, string_t): - args = anyjson.loads(args) - - # Keyword args. - kwargs = kw.get('kwargs') or {} - if isinstance(kwargs, string_t): - kwargs = anyjson.loads(kwargs) - - # Expires can be int/float. - expires = kw.get('expires') or None - try: - expires = float(expires) - except (TypeError, ValueError): - # or a string describing an ISO 8601 datetime. - try: - expires = maybe_iso8601(expires) - except (TypeError, ValueError): - raise - - res = self.app.send_task(name, args=args, kwargs=kwargs, - countdown=kw.get('countdown'), - serializer=kw.get('serializer'), - queue=kw.get('queue'), - exchange=kw.get('exchange'), - routing_key=kw.get('routing_key'), - eta=maybe_iso8601(kw.get('eta')), - expires=expires) - self.out(res.id) - - -class purge(Command): - """Erase all messages from all known task queues. - - WARNING: There is no undo operation for this command. - - """ - warn_prelude = ( - '{warning}: This will remove all tasks from {queues}: {names}.\n' - ' There is no undo for this operation!\n\n' - '(to skip this prompt use the -f option)\n' - ) - warn_prompt = 'Are you sure you want to delete all tasks' - fmt_purged = 'Purged {mnum} {messages} from {qnum} known task {queues}.' - fmt_empty = 'No messages purged from {qnum} {queues}' - option_list = Command.option_list + ( - Option('--force', '-f', action='store_true', - help='Do not prompt for verification'), - ) - - def run(self, force=False, **kwargs): - names = list(sorted(self.app.amqp.queues.keys())) - qnum = len(names) - if not force: - self.out(self.warn_prelude.format( - warning=self.colored.red('WARNING'), - queues=text.pluralize(qnum, 'queue'), names=', '.join(names), - )) - if self.ask(self.warn_prompt, ('yes', 'no'), 'no') != 'yes': - return - messages = self.app.control.purge() - fmt = self.fmt_purged if messages else self.fmt_empty - self.out(fmt.format( - mnum=messages, qnum=qnum, - messages=text.pluralize(messages, 'message'), - queues=text.pluralize(qnum, 'queue'))) - - -class result(Command): - """Gives the return value for a given task id. - - Examples:: - - celery result 8f511516-e2f5-4da4-9d2f-0fb83a86e500 - celery result 8f511516-e2f5-4da4-9d2f-0fb83a86e500 -t tasks.add - celery result 8f511516-e2f5-4da4-9d2f-0fb83a86e500 --traceback - - """ - args = '' - option_list = Command.option_list + ( - Option('--task', '-t', help='name of task (if custom backend)'), - Option('--traceback', action='store_true', - help='show traceback instead'), - ) - - def run(self, task_id, *args, **kwargs): - result_cls = self.app.AsyncResult - task = kwargs.get('task') - traceback = kwargs.get('traceback', False) - - if task: - result_cls = self.app.tasks[task].AsyncResult - result = result_cls(task_id) - if traceback: - value = result.traceback - else: - value = result.get() - self.out(self.pretty(value)[1]) - - -class _RemoteControl(Command): - name = None - choices = None - leaf = False - option_list = Command.option_list + ( - Option('--timeout', '-t', type='float', - help='Timeout in seconds (float) waiting for reply'), - Option('--destination', '-d', - help='Comma separated list of destination node names.')) - - def __init__(self, *args, **kwargs): - self.show_body = kwargs.pop('show_body', True) - self.show_reply = kwargs.pop('show_reply', True) - super(_RemoteControl, self).__init__(*args, **kwargs) - - @classmethod - def get_command_info(self, command, - indent=0, prefix='', color=None, help=False): - if help: - help = '|' + text.indent(self.choices[command][1], indent + 4) - else: - help = None - try: - # see if it uses args. - meth = getattr(self, command) - return text.join([ - '|' + text.indent('{0}{1} {2}'.format( - prefix, color(command), meth.__doc__), indent), - help, - ]) - - except AttributeError: - return text.join([ - '|' + text.indent(prefix + str(color(command)), indent), help, - ]) - - @classmethod - def list_commands(self, indent=0, prefix='', color=None, help=False): - color = color if color else lambda x: x - prefix = prefix + ' ' if prefix else '' - return '\n'.join(self.get_command_info(c, indent, prefix, color, help) - for c in sorted(self.choices)) - - @property - def epilog(self): - return '\n'.join([ - '[Commands]', - self.list_commands(indent=4, help=True) - ]) - - def usage(self, command): - return '%prog {0} [options] {1} [arg1 .. argN]'.format( - command, self.args) - - def call(self, *args, **kwargs): - raise NotImplementedError('call') - - def run(self, *args, **kwargs): - if not args: - raise self.UsageError( - 'Missing {0.name} method. See --help'.format(self)) - return self.do_call_method(args, **kwargs) - - def do_call_method(self, args, **kwargs): - method = args[0] - if method == 'help': - raise self.Error("Did you mean '{0.name} --help'?".format(self)) - if method not in self.choices: - raise self.UsageError( - 'Unknown {0.name} method {1}'.format(self, method)) - - if self.app.connection().transport.driver_type == 'sql': - raise self.Error('Broadcast not supported by SQL broker transport') - - destination = kwargs.get('destination') - timeout = kwargs.get('timeout') or self.choices[method][0] - if destination and isinstance(destination, string_t): - destination = [dest.strip() for dest in destination.split(',')] - - handler = getattr(self, method, self.call) - - replies = handler(method, *args[1:], timeout=timeout, - destination=destination, - callback=self.say_remote_command_reply) - if not replies: - raise self.Error('No nodes replied within time constraint.', - status=EX_UNAVAILABLE) - return replies - - -class inspect(_RemoteControl): - """Inspect the worker at runtime. - - Availability: RabbitMQ (amqp), Redis, and MongoDB transports. - - Examples:: - - celery inspect active --timeout=5 - celery inspect scheduled -d worker1@example.com - celery inspect revoked -d w1@e.com,w2@e.com - - """ - name = 'inspect' - choices = { - 'active': (1.0, 'dump active tasks (being processed)'), - 'active_queues': (1.0, 'dump queues being consumed from'), - 'scheduled': (1.0, 'dump scheduled tasks (eta/countdown/retry)'), - 'reserved': (1.0, 'dump reserved tasks (waiting to be processed)'), - 'stats': (1.0, 'dump worker statistics'), - 'revoked': (1.0, 'dump of revoked task ids'), - 'registered': (1.0, 'dump of registered tasks'), - 'ping': (0.2, 'ping worker(s)'), - 'clock': (1.0, 'get value of logical clock'), - 'conf': (1.0, 'dump worker configuration'), - 'report': (1.0, 'get bugreport info'), - 'memsample': (1.0, 'sample memory (requires psutil)'), - 'memdump': (1.0, 'dump memory samples (requires psutil)'), - 'objgraph': (60.0, 'create object graph (requires objgraph)'), - } - - def call(self, method, *args, **options): - i = self.app.control.inspect(**options) - return getattr(i, method)(*args) - - def objgraph(self, type_='Request', *args, **kwargs): - return self.call('objgraph', type_, **kwargs) - - def conf(self, with_defaults=False, *args, **kwargs): - return self.call('conf', with_defaults, **kwargs) - - -class control(_RemoteControl): - """Workers remote control. - - Availability: RabbitMQ (amqp), Redis, and MongoDB transports. - - Examples:: - - celery control enable_events --timeout=5 - celery control -d worker1@example.com enable_events - celery control -d w1.e.com,w2.e.com enable_events - - celery control -d w1.e.com add_consumer queue_name - celery control -d w1.e.com cancel_consumer queue_name - - celery control -d w1.e.com add_consumer queue exchange direct rkey - - """ - name = 'control' - choices = { - 'enable_events': (1.0, 'tell worker(s) to enable events'), - 'disable_events': (1.0, 'tell worker(s) to disable events'), - 'add_consumer': (1.0, 'tell worker(s) to start consuming a queue'), - 'cancel_consumer': (1.0, 'tell worker(s) to stop consuming a queue'), - 'rate_limit': ( - 1.0, 'tell worker(s) to modify the rate limit for a task type'), - 'time_limit': ( - 1.0, 'tell worker(s) to modify the time limit for a task type.'), - 'autoscale': (1.0, 'change autoscale settings'), - 'pool_grow': (1.0, 'start more pool processes'), - 'pool_shrink': (1.0, 'use less pool processes'), - } - - def call(self, method, *args, **options): - return getattr(self.app.control, method)(*args, reply=True, **options) - - def pool_grow(self, method, n=1, **kwargs): - """[N=1]""" - return self.call(method, int(n), **kwargs) - - def pool_shrink(self, method, n=1, **kwargs): - """[N=1]""" - return self.call(method, int(n), **kwargs) - - def autoscale(self, method, max=None, min=None, **kwargs): - """[max] [min]""" - return self.call(method, int(max), int(min), **kwargs) - - def rate_limit(self, method, task_name, rate_limit, **kwargs): - """ (e.g. 5/s | 5/m | 5/h)>""" - return self.call(method, task_name, rate_limit, **kwargs) - - def time_limit(self, method, task_name, soft, hard=None, **kwargs): - """ [hard_secs]""" - return self.call(method, task_name, - float(soft), float(hard), **kwargs) - - def add_consumer(self, method, queue, exchange=None, - exchange_type='direct', routing_key=None, **kwargs): - """ [exchange [type [routing_key]]]""" - return self.call(method, queue, exchange, - exchange_type, routing_key, **kwargs) - - def cancel_consumer(self, method, queue, **kwargs): - """""" - return self.call(method, queue, **kwargs) - - -class status(Command): - """Show list of workers that are online.""" - option_list = inspect.option_list - - def run(self, *args, **kwargs): - I = inspect( - app=self.app, - no_color=kwargs.get('no_color', False), - stdout=self.stdout, stderr=self.stderr, - show_reply=False, show_body=False, quiet=True, - ) - replies = I.run('ping', **kwargs) - if not replies: - raise self.Error('No nodes replied within time constraint', - status=EX_UNAVAILABLE) - nodecount = len(replies) - if not kwargs.get('quiet', False): - self.out('\n{0} {1} online.'.format( - nodecount, text.pluralize(nodecount, 'node'))) - - -class migrate(Command): - """Migrate tasks from one broker to another. - - Examples:: - - celery migrate redis://localhost amqp://guest@localhost// - celery migrate django:// redis://localhost - - NOTE: This command is experimental, make sure you have - a backup of the tasks before you continue. - """ - args = ' ' - option_list = Command.option_list + ( - Option('--limit', '-n', type='int', - help='Number of tasks to consume (int)'), - Option('--timeout', '-t', type='float', default=1.0, - help='Timeout in seconds (float) waiting for tasks'), - Option('--ack-messages', '-a', action='store_true', - help='Ack messages from source broker.'), - Option('--tasks', '-T', - help='List of task names to filter on.'), - Option('--queues', '-Q', - help='List of queues to migrate.'), - Option('--forever', '-F', action='store_true', - help='Continually migrate tasks until killed.'), - ) - progress_fmt = MIGRATE_PROGRESS_FMT - - def on_migrate_task(self, state, body, message): - self.out(self.progress_fmt.format(state=state, body=body)) - - def run(self, source, destination, **kwargs): - from kombu import Connection - from celery.contrib.migrate import migrate_tasks - - migrate_tasks(Connection(source), - Connection(destination), - callback=self.on_migrate_task, - **kwargs) - - -class shell(Command): # pragma: no cover - """Start shell session with convenient access to celery symbols. - - The following symbols will be added to the main globals: - - - celery: the current application. - - chord, group, chain, chunks, - xmap, xstarmap subtask, Task - - all registered tasks. - - """ - option_list = Command.option_list + ( - Option('--ipython', '-I', - action='store_true', dest='force_ipython', - help='force iPython.'), - Option('--bpython', '-B', - action='store_true', dest='force_bpython', - help='force bpython.'), - Option('--python', '-P', - action='store_true', dest='force_python', - help='force default Python shell.'), - Option('--without-tasks', '-T', action='store_true', - help="don't add tasks to locals."), - Option('--eventlet', action='store_true', - help='use eventlet.'), - Option('--gevent', action='store_true', help='use gevent.'), - ) - - def run(self, force_ipython=False, force_bpython=False, - force_python=False, without_tasks=False, eventlet=False, - gevent=False, **kwargs): - sys.path.insert(0, os.getcwd()) - if eventlet: - import_module('celery.concurrency.eventlet') - if gevent: - import_module('celery.concurrency.gevent') - import celery - import celery.task.base - self.app.loader.import_default_modules() - self.locals = {'app': self.app, - 'celery': self.app, - 'Task': celery.Task, - 'chord': celery.chord, - 'group': celery.group, - 'chain': celery.chain, - 'chunks': celery.chunks, - 'xmap': celery.xmap, - 'xstarmap': celery.xstarmap, - 'subtask': celery.subtask, - 'signature': celery.signature} - - if not without_tasks: - self.locals.update(dict( - (task.__name__, task) for task in values(self.app.tasks) - if not task.name.startswith('celery.')), - ) - - if force_python: - return self.invoke_fallback_shell() - elif force_bpython: - return self.invoke_bpython_shell() - elif force_ipython: - return self.invoke_ipython_shell() - return self.invoke_default_shell() - - def invoke_default_shell(self): - try: - import IPython # noqa - except ImportError: - try: - import bpython # noqa - except ImportError: - return self.invoke_fallback_shell() - else: - return self.invoke_bpython_shell() - else: - return self.invoke_ipython_shell() - - def invoke_fallback_shell(self): - import code - try: - import readline - except ImportError: - pass - else: - import rlcompleter - readline.set_completer( - rlcompleter.Completer(self.locals).complete) - readline.parse_and_bind('tab:complete') - code.interact(local=self.locals) - - def invoke_ipython_shell(self): - for ip in (self._ipython, self._ipython_pre_10, - self._ipython_terminal, self._ipython_010, - self._no_ipython): - try: - return ip() - except ImportError: - pass - - def _ipython(self): - from IPython import start_ipython - start_ipython(argv=[], user_ns=self.locals) - - def _ipython_pre_10(self): # pragma: no cover - from IPython.frontend.terminal.ipapp import TerminalIPythonApp - app = TerminalIPythonApp.instance() - app.initialize(argv=[]) - app.shell.user_ns.update(self.locals) - app.start() - - def _ipython_terminal(self): # pragma: no cover - from IPython.terminal import embed - embed.TerminalInteractiveShell(user_ns=self.locals).mainloop() - - def _ipython_010(self): # pragma: no cover - from IPython.Shell import IPShell - IPShell(argv=[], user_ns=self.locals).mainloop() - - def _no_ipython(self): # pragma: no cover - raise ImportError("no suitable ipython found") - - def invoke_bpython_shell(self): - import bpython - bpython.embed(self.locals) - - -class help(Command): - """Show help screen and exit.""" - - def usage(self, command): - return '%prog [options] {0.args}'.format(self) - - def run(self, *args, **kwargs): - self.parser.print_help() - self.out(HELP.format( - prog_name=self.prog_name, - commands=CeleryCommand.list_commands(colored=self.colored), - )) - - return EX_USAGE - - -class report(Command): - """Shows information useful to include in bugreports.""" - - def run(self, *args, **kwargs): - self.out(self.app.bugreport()) - return EX_OK - - -class CeleryCommand(Command): - namespace = 'celery' - ext_fmt = '{self.namespace}.commands' - commands = { - 'amqp': amqp, - 'beat': beat, - 'call': call, - 'control': control, - 'events': events, - 'graph': graph, - 'help': help, - 'inspect': inspect, - 'list': list_, - 'migrate': migrate, - 'multi': multi, - 'purge': purge, - 'report': report, - 'result': result, - 'shell': shell, - 'status': status, - 'worker': worker, - - } - enable_config_from_cmdline = True - prog_name = 'celery' - - @classmethod - def register_command(cls, fun, name=None): - cls.commands[name or fun.__name__] = fun - return fun - - def execute(self, command, argv=None): - try: - cls = self.commands[command] - except KeyError: - cls, argv = self.commands['help'], ['help'] - cls = self.commands.get(command) or self.commands['help'] - try: - return cls( - app=self.app, on_error=self.on_error, - no_color=self.no_color, quiet=self.quiet, - on_usage_error=partial(self.on_usage_error, command=command), - ).run_from_argv(self.prog_name, argv[1:], command=argv[0]) - except self.UsageError as exc: - self.on_usage_error(exc) - return exc.status - except self.Error as exc: - self.on_error(exc) - return exc.status - - def on_usage_error(self, exc, command=None): - if command: - helps = '{self.prog_name} {command} --help' - else: - helps = '{self.prog_name} --help' - self.error(self.colored.magenta('Error: {0}'.format(exc))) - self.error("""Please try '{0}'""".format(helps.format( - self=self, command=command, - ))) - - def _relocate_args_from_start(self, argv, index=0): - if argv: - rest = [] - while index < len(argv): - value = argv[index] - if value.startswith('--'): - rest.append(value) - elif value.startswith('-'): - # we eat the next argument even though we don't know - # if this option takes an argument or not. - # instead we will assume what is the command name in the - # return statements below. - try: - nxt = argv[index + 1] - if nxt.startswith('-'): - # is another option - rest.append(value) - else: - # is (maybe) a value for this option - rest.extend([value, nxt]) - index += 1 - except IndexError: - rest.append(value) - break - else: - break - index += 1 - if argv[index:]: - # if there are more arguments left then divide and swap - # we assume the first argument in argv[i:] is the command - # name. - return argv[index:] + rest - # if there are no more arguments then the last arg in rest' - # must be the command. - [rest.pop()] + rest - return [] - - def prepare_prog_name(self, name): - if name == '__main__.py': - return sys.modules['__main__'].__file__ - return name - - def handle_argv(self, prog_name, argv): - self.prog_name = self.prepare_prog_name(prog_name) - argv = self._relocate_args_from_start(argv) - _, argv = self.prepare_args(None, argv) - try: - command = argv[0] - except IndexError: - command, argv = 'help', ['help'] - return self.execute(command, argv) - - def execute_from_commandline(self, argv=None): - argv = sys.argv if argv is None else argv - if 'multi' in argv[1:3]: # Issue 1008 - self.respects_app_option = False - try: - sys.exit(determine_exit_status( - super(CeleryCommand, self).execute_from_commandline(argv))) - except KeyboardInterrupt: - sys.exit(EX_FAILURE) - - @classmethod - def get_command_info(self, command, indent=0, color=None, colored=None): - colored = term.colored() if colored is None else colored - colored = colored.names[color] if color else lambda x: x - obj = self.commands[command] - cmd = 'celery {0}'.format(colored(command)) - if obj.leaf: - return '|' + text.indent(cmd, indent) - return text.join([ - ' ', - '|' + text.indent('{0} --help'.format(cmd), indent), - obj.list_commands(indent, 'celery {0}'.format(command), colored), - ]) - - @classmethod - def list_commands(self, indent=0, colored=None): - colored = term.colored() if colored is None else colored - white = colored.white - ret = [] - for cls, commands, color in command_classes: - ret.extend([ - text.indent('+ {0}: '.format(white(cls)), indent), - '\n'.join( - self.get_command_info(command, indent + 4, color, colored) - for command in commands), - '' - ]) - return '\n'.join(ret).strip() - - def with_pool_option(self, argv): - if len(argv) > 1 and 'worker' in argv[0:3]: - # this command supports custom pools - # that may have to be loaded as early as possible. - return (['-P'], ['--pool']) - - def on_concurrency_setup(self): - self.load_extension_commands() - - def load_extension_commands(self): - names = Extensions(self.ext_fmt.format(self=self), - self.register_command).load() - if names: - command_classes.append(('Extensions', names, 'magenta')) - - -def command(*args, **kwargs): - """Deprecated: Use classmethod :meth:`CeleryCommand.register_command` - instead.""" - _register = CeleryCommand.register_command - return _register(args[0]) if args else _register - - -if __name__ == '__main__': # pragma: no cover - main() diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/celeryd_detach.py b/thesisenv/lib/python3.6/site-packages/celery/bin/celeryd_detach.py deleted file mode 100644 index 4d37d5f..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/bin/celeryd_detach.py +++ /dev/null @@ -1,181 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.bin.celeryd_detach - ~~~~~~~~~~~~~~~~~~~~~~~~~ - - Program used to daemonize the worker - - Using :func:`os.execv` because forking and multiprocessing - leads to weird issues (it was a long time ago now, but it - could have something to do with the threading mutex bug) - -""" -from __future__ import absolute_import - -import celery -import os -import sys - -from optparse import OptionParser, BadOptionError - -from celery.platforms import EX_FAILURE, detached -from celery.utils import default_nodename, node_format -from celery.utils.log import get_logger - -from celery.bin.base import daemon_options, Option - -__all__ = ['detached_celeryd', 'detach'] - -logger = get_logger(__name__) - -C_FAKEFORK = os.environ.get('C_FAKEFORK') - -OPTION_LIST = daemon_options(default_pidfile='celeryd.pid') + ( - Option('--workdir', default=None, dest='working_directory'), - Option('-n', '--hostname'), - Option('--fake', - default=False, action='store_true', dest='fake', - help="Don't fork (for debugging purposes)"), -) - - -def detach(path, argv, logfile=None, pidfile=None, uid=None, - gid=None, umask=None, working_directory=None, fake=False, app=None, - executable=None, hostname=None): - hostname = default_nodename(hostname) - logfile = node_format(logfile, hostname) - pidfile = node_format(pidfile, hostname) - fake = 1 if C_FAKEFORK else fake - with detached(logfile, pidfile, uid, gid, umask, working_directory, fake, - after_forkers=False): - try: - if executable is not None: - path = executable - os.execv(path, [path] + argv) - except Exception: - if app is None: - from celery import current_app - app = current_app - app.log.setup_logging_subsystem( - 'ERROR', logfile, hostname=hostname) - logger.critical("Can't exec %r", ' '.join([path] + argv), - exc_info=True) - return EX_FAILURE - - -class PartialOptionParser(OptionParser): - - def __init__(self, *args, **kwargs): - self.leftovers = [] - OptionParser.__init__(self, *args, **kwargs) - - def _process_long_opt(self, rargs, values): - arg = rargs.pop(0) - - if '=' in arg: - opt, next_arg = arg.split('=', 1) - rargs.insert(0, next_arg) - had_explicit_value = True - else: - opt = arg - had_explicit_value = False - - try: - opt = self._match_long_opt(opt) - option = self._long_opt.get(opt) - except BadOptionError: - option = None - - if option: - if option.takes_value(): - nargs = option.nargs - if len(rargs) < nargs: - if nargs == 1: - self.error('{0} requires an argument'.format(opt)) - else: - self.error('{0} requires {1} arguments'.format( - opt, nargs)) - elif nargs == 1: - value = rargs.pop(0) - else: - value = tuple(rargs[0:nargs]) - del rargs[0:nargs] - - elif had_explicit_value: - self.error('{0} option does not take a value'.format(opt)) - else: - value = None - option.process(opt, value, values, self) - else: - self.leftovers.append(arg) - - def _process_short_opts(self, rargs, values): - arg = rargs[0] - try: - OptionParser._process_short_opts(self, rargs, values) - except BadOptionError: - self.leftovers.append(arg) - if rargs and not rargs[0][0] == '-': - self.leftovers.append(rargs.pop(0)) - - -class detached_celeryd(object): - option_list = OPTION_LIST - usage = '%prog [options] [celeryd options]' - version = celery.VERSION_BANNER - description = ('Detaches Celery worker nodes. See `celery worker --help` ' - 'for the list of supported worker arguments.') - command = sys.executable - execv_path = sys.executable - if sys.version_info < (2, 7): # does not support pkg/__main__.py - execv_argv = ['-m', 'celery.__main__', 'worker'] - else: - execv_argv = ['-m', 'celery', 'worker'] - - def __init__(self, app=None): - self.app = app - - def Parser(self, prog_name): - return PartialOptionParser(prog=prog_name, - option_list=self.option_list, - usage=self.usage, - description=self.description, - version=self.version) - - def parse_options(self, prog_name, argv): - parser = self.Parser(prog_name) - options, values = parser.parse_args(argv) - if options.logfile: - parser.leftovers.append('--logfile={0}'.format(options.logfile)) - if options.pidfile: - parser.leftovers.append('--pidfile={0}'.format(options.pidfile)) - if options.hostname: - parser.leftovers.append('--hostname={0}'.format(options.hostname)) - return options, values, parser.leftovers - - def execute_from_commandline(self, argv=None): - if argv is None: - argv = sys.argv - config = [] - seen_cargs = 0 - for arg in argv: - if seen_cargs: - config.append(arg) - else: - if arg == '--': - seen_cargs = 1 - config.append(arg) - prog_name = os.path.basename(argv[0]) - options, values, leftovers = self.parse_options(prog_name, argv[1:]) - sys.exit(detach( - app=self.app, path=self.execv_path, - argv=self.execv_argv + leftovers + config, - **vars(options) - )) - - -def main(app=None): - detached_celeryd(app).execute_from_commandline() - -if __name__ == '__main__': # pragma: no cover - main() diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/events.py b/thesisenv/lib/python3.6/site-packages/celery/bin/events.py deleted file mode 100644 index 8cc61b6..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/bin/events.py +++ /dev/null @@ -1,139 +0,0 @@ -# -*- coding: utf-8 -*- -""" - -The :program:`celery events` command. - -.. program:: celery events - -.. seealso:: - - See :ref:`preload-options` and :ref:`daemon-options`. - -.. cmdoption:: -d, --dump - - Dump events to stdout. - -.. cmdoption:: -c, --camera - - Take snapshots of events using this camera. - -.. cmdoption:: --detach - - Camera: Detach and run in the background as a daemon. - -.. cmdoption:: -F, --freq, --frequency - - Camera: Shutter frequency. Default is every 1.0 seconds. - -.. cmdoption:: -r, --maxrate - - Camera: Optional shutter rate limit (e.g. 10/m). - -.. cmdoption:: -l, --loglevel - - Logging level, choose between `DEBUG`, `INFO`, `WARNING`, - `ERROR`, `CRITICAL`, or `FATAL`. Default is INFO. - -""" -from __future__ import absolute_import, unicode_literals - -import sys - -from functools import partial - -from celery.platforms import detached, set_process_title, strargv -from celery.bin.base import Command, Option, daemon_options - -__all__ = ['events'] - - -class events(Command): - """Event-stream utilities. - - Commands:: - - celery events --app=proj - start graphical monitor (requires curses) - celery events -d --app=proj - dump events to screen. - celery events -b amqp:// - celery events -c [options] - run snapshot camera. - - Examples:: - - celery events - celery events -d - celery events -c mod.attr -F 1.0 --detach --maxrate=100/m -l info - """ - doc = __doc__ - supports_args = False - - def run(self, dump=False, camera=None, frequency=1.0, maxrate=None, - loglevel='INFO', logfile=None, prog_name='celery events', - pidfile=None, uid=None, gid=None, umask=None, - working_directory=None, detach=False, **kwargs): - self.prog_name = prog_name - - if dump: - return self.run_evdump() - if camera: - return self.run_evcam(camera, freq=frequency, maxrate=maxrate, - loglevel=loglevel, logfile=logfile, - pidfile=pidfile, uid=uid, gid=gid, - umask=umask, - working_directory=working_directory, - detach=detach) - return self.run_evtop() - - def run_evdump(self): - from celery.events.dumper import evdump - self.set_process_status('dump') - return evdump(app=self.app) - - def run_evtop(self): - from celery.events.cursesmon import evtop - self.set_process_status('top') - return evtop(app=self.app) - - def run_evcam(self, camera, logfile=None, pidfile=None, uid=None, - gid=None, umask=None, working_directory=None, - detach=False, **kwargs): - from celery.events.snapshot import evcam - workdir = working_directory - self.set_process_status('cam') - kwargs['app'] = self.app - cam = partial(evcam, camera, - logfile=logfile, pidfile=pidfile, **kwargs) - - if detach: - with detached(logfile, pidfile, uid, gid, umask, workdir): - return cam() - else: - return cam() - - def set_process_status(self, prog, info=''): - prog = '{0}:{1}'.format(self.prog_name, prog) - info = '{0} {1}'.format(info, strargv(sys.argv)) - return set_process_title(prog, info=info) - - def get_options(self): - return ( - (Option('-d', '--dump', action='store_true'), - Option('-c', '--camera'), - Option('--detach', action='store_true'), - Option('-F', '--frequency', '--freq', - type='float', default=1.0), - Option('-r', '--maxrate'), - Option('-l', '--loglevel', default='INFO')) + - daemon_options(default_pidfile='celeryev.pid') + - tuple(self.app.user_options['events']) - ) - - -def main(): - ev = events() - ev.execute_from_commandline() - -if __name__ == '__main__': # pragma: no cover - main() diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/graph.py b/thesisenv/lib/python3.6/site-packages/celery/bin/graph.py deleted file mode 100644 index 5d58476..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/bin/graph.py +++ /dev/null @@ -1,191 +0,0 @@ -# -*- coding: utf-8 -*- -""" - -The :program:`celery graph` command. - -.. program:: celery graph - -""" -from __future__ import absolute_import, unicode_literals - -from operator import itemgetter - -from celery.datastructures import DependencyGraph, GraphFormatter -from celery.five import items - -from .base import Command - -__all__ = ['graph'] - - -class graph(Command): - args = """ [arguments] - ..... bootsteps [worker] [consumer] - ..... workers [enumerate] - """ - - def run(self, what=None, *args, **kwargs): - map = {'bootsteps': self.bootsteps, 'workers': self.workers} - if not what: - raise self.UsageError('missing type') - elif what not in map: - raise self.Error('no graph {0} in {1}'.format(what, '|'.join(map))) - return map[what](*args, **kwargs) - - def bootsteps(self, *args, **kwargs): - worker = self.app.WorkController() - include = set(arg.lower() for arg in args or ['worker', 'consumer']) - if 'worker' in include: - graph = worker.blueprint.graph - if 'consumer' in include: - worker.blueprint.connect_with(worker.consumer.blueprint) - else: - graph = worker.consumer.blueprint.graph - graph.to_dot(self.stdout) - - def workers(self, *args, **kwargs): - - def simplearg(arg): - return maybe_list(itemgetter(0, 2)(arg.partition(':'))) - - def maybe_list(l, sep=','): - return (l[0], l[1].split(sep) if sep in l[1] else l[1]) - - args = dict(simplearg(arg) for arg in args) - generic = 'generic' in args - - def generic_label(node): - return '{0} ({1}://)'.format(type(node).__name__, - node._label.split('://')[0]) - - class Node(object): - force_label = None - scheme = {} - - def __init__(self, label, pos=None): - self._label = label - self.pos = pos - - def label(self): - return self._label - - def __str__(self): - return self.label() - - class Thread(Node): - scheme = {'fillcolor': 'lightcyan4', 'fontcolor': 'yellow', - 'shape': 'oval', 'fontsize': 10, 'width': 0.3, - 'color': 'black'} - - def __init__(self, label, **kwargs): - self._label = 'thr-{0}'.format(next(tids)) - self.real_label = label - self.pos = 0 - - class Formatter(GraphFormatter): - - def label(self, obj): - return obj and obj.label() - - def node(self, obj): - scheme = dict(obj.scheme) if obj.pos else obj.scheme - if isinstance(obj, Thread): - scheme['label'] = obj.real_label - return self.draw_node( - obj, dict(self.node_scheme, **scheme), - ) - - def terminal_node(self, obj): - return self.draw_node( - obj, dict(self.term_scheme, **obj.scheme), - ) - - def edge(self, a, b, **attrs): - if isinstance(a, Thread): - attrs.update(arrowhead='none', arrowtail='tee') - return self.draw_edge(a, b, self.edge_scheme, attrs) - - def subscript(n): - S = {'0': '₀', '1': '₁', '2': '₂', '3': '₃', '4': '₄', - '5': '₅', '6': '₆', '7': '₇', '8': '₈', '9': '₉'} - return ''.join([S[i] for i in str(n)]) - - class Worker(Node): - pass - - class Backend(Node): - scheme = {'shape': 'folder', 'width': 2, - 'height': 1, 'color': 'black', - 'fillcolor': 'peachpuff3', 'color': 'peachpuff4'} - - def label(self): - return generic_label(self) if generic else self._label - - class Broker(Node): - scheme = {'shape': 'circle', 'fillcolor': 'cadetblue3', - 'color': 'cadetblue4', 'height': 1} - - def label(self): - return generic_label(self) if generic else self._label - - from itertools import count - tids = count(1) - Wmax = int(args.get('wmax', 4) or 0) - Tmax = int(args.get('tmax', 3) or 0) - - def maybe_abbr(l, name, max=Wmax): - size = len(l) - abbr = max and size > max - if 'enumerate' in args: - l = ['{0}{1}'.format(name, subscript(i + 1)) - for i, obj in enumerate(l)] - if abbr: - l = l[0:max - 1] + [l[size - 1]] - l[max - 2] = '{0}⎨…{1}⎬'.format( - name[0], subscript(size - (max - 1))) - return l - - try: - workers = args['nodes'] - threads = args.get('threads') or [] - except KeyError: - replies = self.app.control.inspect().stats() - workers, threads = [], [] - for worker, reply in items(replies): - workers.append(worker) - threads.append(reply['pool']['max-concurrency']) - - wlen = len(workers) - backend = args.get('backend', self.app.conf.CELERY_RESULT_BACKEND) - threads_for = {} - workers = maybe_abbr(workers, 'Worker') - if Wmax and wlen > Wmax: - threads = threads[0:3] + [threads[-1]] - for i, threads in enumerate(threads): - threads_for[workers[i]] = maybe_abbr( - list(range(int(threads))), 'P', Tmax, - ) - - broker = Broker(args.get('broker', self.app.connection().as_uri())) - backend = Backend(backend) if backend else None - graph = DependencyGraph(formatter=Formatter()) - graph.add_arc(broker) - if backend: - graph.add_arc(backend) - curworker = [0] - for i, worker in enumerate(workers): - worker = Worker(worker, pos=i) - graph.add_arc(worker) - graph.add_edge(worker, broker) - if backend: - graph.add_edge(worker, backend) - threads = threads_for.get(worker._label) - if threads: - for thread in threads: - thread = Thread(thread) - graph.add_arc(thread) - graph.add_edge(thread, worker) - - curworker[0] += 1 - - graph.to_dot(self.stdout) diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/multi.py b/thesisenv/lib/python3.6/site-packages/celery/bin/multi.py deleted file mode 100644 index f30aa9e..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/bin/multi.py +++ /dev/null @@ -1,646 +0,0 @@ -# -*- coding: utf-8 -*- -""" - -.. program:: celery multi - -Examples -======== - -.. code-block:: bash - - # Single worker with explicit name and events enabled. - $ celery multi start Leslie -E - - # Pidfiles and logfiles are stored in the current directory - # by default. Use --pidfile and --logfile argument to change - # this. The abbreviation %N will be expanded to the current - # node name. - $ celery multi start Leslie -E --pidfile=/var/run/celery/%N.pid - --logfile=/var/log/celery/%N.log - - - # You need to add the same arguments when you restart, - # as these are not persisted anywhere. - $ celery multi restart Leslie -E --pidfile=/var/run/celery/%N.pid - --logfile=/var/run/celery/%N.log - - # To stop the node, you need to specify the same pidfile. - $ celery multi stop Leslie --pidfile=/var/run/celery/%N.pid - - # 3 workers, with 3 processes each - $ celery multi start 3 -c 3 - celery worker -n celery1@myhost -c 3 - celery worker -n celery2@myhost -c 3 - celery worker -n celery3@myhost -c 3 - - # start 3 named workers - $ celery multi start image video data -c 3 - celery worker -n image@myhost -c 3 - celery worker -n video@myhost -c 3 - celery worker -n data@myhost -c 3 - - # specify custom hostname - $ celery multi start 2 --hostname=worker.example.com -c 3 - celery worker -n celery1@worker.example.com -c 3 - celery worker -n celery2@worker.example.com -c 3 - - # specify fully qualified nodenames - $ celery multi start foo@worker.example.com bar@worker.example.com -c 3 - - # Advanced example starting 10 workers in the background: - # * Three of the workers processes the images and video queue - # * Two of the workers processes the data queue with loglevel DEBUG - # * the rest processes the default' queue. - $ celery multi start 10 -l INFO -Q:1-3 images,video -Q:4,5 data - -Q default -L:4,5 DEBUG - - # You can show the commands necessary to start the workers with - # the 'show' command: - $ celery multi show 10 -l INFO -Q:1-3 images,video -Q:4,5 data - -Q default -L:4,5 DEBUG - - # Additional options are added to each celery worker' comamnd, - # but you can also modify the options for ranges of, or specific workers - - # 3 workers: Two with 3 processes, and one with 10 processes. - $ celery multi start 3 -c 3 -c:1 10 - celery worker -n celery1@myhost -c 10 - celery worker -n celery2@myhost -c 3 - celery worker -n celery3@myhost -c 3 - - # can also specify options for named workers - $ celery multi start image video data -c 3 -c:image 10 - celery worker -n image@myhost -c 10 - celery worker -n video@myhost -c 3 - celery worker -n data@myhost -c 3 - - # ranges and lists of workers in options is also allowed: - # (-c:1-3 can also be written as -c:1,2,3) - $ celery multi start 5 -c 3 -c:1-3 10 - celery worker -n celery1@myhost -c 10 - celery worker -n celery2@myhost -c 10 - celery worker -n celery3@myhost -c 10 - celery worker -n celery4@myhost -c 3 - celery worker -n celery5@myhost -c 3 - - # lists also works with named workers - $ celery multi start foo bar baz xuzzy -c 3 -c:foo,bar,baz 10 - celery worker -n foo@myhost -c 10 - celery worker -n bar@myhost -c 10 - celery worker -n baz@myhost -c 10 - celery worker -n xuzzy@myhost -c 3 - -""" -from __future__ import absolute_import, print_function, unicode_literals - -import errno -import os -import shlex -import signal -import socket -import sys - -from collections import defaultdict, namedtuple -from subprocess import Popen -from time import sleep - -from kombu.utils import cached_property -from kombu.utils.compat import OrderedDict -from kombu.utils.encoding import from_utf8 - -from celery import VERSION_BANNER -from celery.five import items -from celery.platforms import Pidfile, IS_WINDOWS -from celery.utils import term, nodesplit -from celery.utils.text import pluralize - -__all__ = ['MultiTool'] - -SIGNAMES = set(sig for sig in dir(signal) - if sig.startswith('SIG') and '_' not in sig) -SIGMAP = dict((getattr(signal, name), name) for name in SIGNAMES) - -USAGE = """\ -usage: {prog_name} start [worker options] - {prog_name} stop [-SIG (default: -TERM)] - {prog_name} stopwait [-SIG (default: -TERM)] - {prog_name} restart [-SIG] [worker options] - {prog_name} kill - - {prog_name} show [worker options] - {prog_name} get hostname [-qv] [worker options] - {prog_name} names - {prog_name} expand template - {prog_name} help - -additional options (must appear after command name): - - * --nosplash: Don't display program info. - * --quiet: Don't show as much output. - * --verbose: Show more output. - * --no-color: Don't display colors. -""" - -multi_args_t = namedtuple( - 'multi_args_t', ('name', 'argv', 'expander', 'namespace'), -) - - -def main(): - sys.exit(MultiTool().execute_from_commandline(sys.argv)) - - -CELERY_EXE = 'celery' -if sys.version_info < (2, 7): - # pkg.__main__ first supported in Py2.7 - CELERY_EXE = 'celery.__main__' - - -def celery_exe(*args): - return ' '.join((CELERY_EXE, ) + args) - - -class MultiTool(object): - retcode = 0 # Final exit code. - - def __init__(self, env=None, fh=None, quiet=False, verbose=False, - no_color=False, nosplash=False, stdout=None, stderr=None): - """fh is an old alias to stdout.""" - self.stdout = self.fh = stdout or fh or sys.stdout - self.stderr = stderr or sys.stderr - self.env = env - self.nosplash = nosplash - self.quiet = quiet - self.verbose = verbose - self.no_color = no_color - self.prog_name = 'celery multi' - self.commands = {'start': self.start, - 'show': self.show, - 'stop': self.stop, - 'stopwait': self.stopwait, - 'stop_verify': self.stopwait, # compat alias - 'restart': self.restart, - 'kill': self.kill, - 'names': self.names, - 'expand': self.expand, - 'get': self.get, - 'help': self.help} - - def execute_from_commandline(self, argv, cmd='celery worker'): - argv = list(argv) # don't modify callers argv. - - # Reserve the --nosplash|--quiet|-q/--verbose options. - if '--nosplash' in argv: - self.nosplash = argv.pop(argv.index('--nosplash')) - if '--quiet' in argv: - self.quiet = argv.pop(argv.index('--quiet')) - if '-q' in argv: - self.quiet = argv.pop(argv.index('-q')) - if '--verbose' in argv: - self.verbose = argv.pop(argv.index('--verbose')) - if '--no-color' in argv: - self.no_color = argv.pop(argv.index('--no-color')) - - self.prog_name = os.path.basename(argv.pop(0)) - if not argv or argv[0][0] == '-': - return self.error() - - try: - self.commands[argv[0]](argv[1:], cmd) - except KeyError: - self.error('Invalid command: {0}'.format(argv[0])) - - return self.retcode - - def say(self, m, newline=True, file=None): - print(m, file=file or self.stdout, end='\n' if newline else '') - - def carp(self, m, newline=True, file=None): - return self.say(m, newline, file or self.stderr) - - def names(self, argv, cmd): - p = NamespacedOptionParser(argv) - self.say('\n'.join( - n.name for n in multi_args(p, cmd)), - ) - - def get(self, argv, cmd): - wanted = argv[0] - p = NamespacedOptionParser(argv[1:]) - for node in multi_args(p, cmd): - if node.name == wanted: - self.say(' '.join(node.argv)) - return - - def show(self, argv, cmd): - p = NamespacedOptionParser(argv) - self.with_detacher_default_options(p) - self.say('\n'.join( - ' '.join([sys.executable] + n.argv) for n in multi_args(p, cmd)), - ) - - def start(self, argv, cmd): - self.splash() - p = NamespacedOptionParser(argv) - self.with_detacher_default_options(p) - retcodes = [] - self.note('> Starting nodes...') - for node in multi_args(p, cmd): - self.note('\t> {0}: '.format(node.name), newline=False) - retcode = self.waitexec(node.argv, path=p.options['--executable']) - self.note(retcode and self.FAILED or self.OK) - retcodes.append(retcode) - self.retcode = int(any(retcodes)) - - def with_detacher_default_options(self, p): - _setdefaultopt(p.options, ['--pidfile', '-p'], '%N.pid') - _setdefaultopt(p.options, ['--logfile', '-f'], '%N.log') - p.options.setdefault( - '--cmd', - '-m {0}'.format(celery_exe('worker', '--detach')), - ) - _setdefaultopt(p.options, ['--executable'], sys.executable) - - def signal_node(self, nodename, pid, sig): - try: - os.kill(pid, sig) - except OSError as exc: - if exc.errno != errno.ESRCH: - raise - self.note('Could not signal {0} ({1}): No such process'.format( - nodename, pid)) - return False - return True - - def node_alive(self, pid): - try: - os.kill(pid, 0) - except OSError as exc: - if exc.errno == errno.ESRCH: - return False - raise - return True - - def shutdown_nodes(self, nodes, sig=signal.SIGTERM, retry=None, - callback=None): - if not nodes: - return - P = set(nodes) - - def on_down(node): - P.discard(node) - if callback: - callback(*node) - - self.note(self.colored.blue('> Stopping nodes...')) - for node in list(P): - if node in P: - nodename, _, pid = node - self.note('\t> {0}: {1} -> {2}'.format( - nodename, SIGMAP[sig][3:], pid)) - if not self.signal_node(nodename, pid, sig): - on_down(node) - - def note_waiting(): - left = len(P) - if left: - pids = ', '.join(str(pid) for _, _, pid in P) - self.note(self.colored.blue( - '> Waiting for {0} {1} -> {2}...'.format( - left, pluralize(left, 'node'), pids)), newline=False) - - if retry: - note_waiting() - its = 0 - while P: - for node in P: - its += 1 - self.note('.', newline=False) - nodename, _, pid = node - if not self.node_alive(pid): - self.note('\n\t> {0}: {1}'.format(nodename, self.OK)) - on_down(node) - note_waiting() - break - if P and not its % len(P): - sleep(float(retry)) - self.note('') - - def getpids(self, p, cmd, callback=None): - _setdefaultopt(p.options, ['--pidfile', '-p'], '%N.pid') - - nodes = [] - for node in multi_args(p, cmd): - try: - pidfile_template = _getopt( - p.namespaces[node.namespace], ['--pidfile', '-p'], - ) - except KeyError: - pidfile_template = _getopt(p.options, ['--pidfile', '-p']) - pid = None - pidfile = node.expander(pidfile_template) - try: - pid = Pidfile(pidfile).read_pid() - except ValueError: - pass - if pid: - nodes.append((node.name, tuple(node.argv), pid)) - else: - self.note('> {0.name}: {1}'.format(node, self.DOWN)) - if callback: - callback(node.name, node.argv, pid) - - return nodes - - def kill(self, argv, cmd): - self.splash() - p = NamespacedOptionParser(argv) - for nodename, _, pid in self.getpids(p, cmd): - self.note('Killing node {0} ({1})'.format(nodename, pid)) - self.signal_node(nodename, pid, signal.SIGKILL) - - def stop(self, argv, cmd, retry=None, callback=None): - self.splash() - p = NamespacedOptionParser(argv) - return self._stop_nodes(p, cmd, retry=retry, callback=callback) - - def _stop_nodes(self, p, cmd, retry=None, callback=None): - restargs = p.args[len(p.values):] - self.shutdown_nodes(self.getpids(p, cmd, callback=callback), - sig=findsig(restargs), - retry=retry, - callback=callback) - - def restart(self, argv, cmd): - self.splash() - p = NamespacedOptionParser(argv) - self.with_detacher_default_options(p) - retvals = [] - - def on_node_shutdown(nodename, argv, pid): - self.note(self.colored.blue( - '> Restarting node {0}: '.format(nodename)), newline=False) - retval = self.waitexec(argv, path=p.options['--executable']) - self.note(retval and self.FAILED or self.OK) - retvals.append(retval) - - self._stop_nodes(p, cmd, retry=2, callback=on_node_shutdown) - self.retval = int(any(retvals)) - - def stopwait(self, argv, cmd): - self.splash() - p = NamespacedOptionParser(argv) - self.with_detacher_default_options(p) - return self._stop_nodes(p, cmd, retry=2) - stop_verify = stopwait # compat - - def expand(self, argv, cmd=None): - template = argv[0] - p = NamespacedOptionParser(argv[1:]) - for node in multi_args(p, cmd): - self.say(node.expander(template)) - - def help(self, argv, cmd=None): - self.say(__doc__) - - def usage(self): - self.splash() - self.say(USAGE.format(prog_name=self.prog_name)) - - def splash(self): - if not self.nosplash: - c = self.colored - self.note(c.cyan('celery multi v{0}'.format(VERSION_BANNER))) - - def waitexec(self, argv, path=sys.executable): - args = ' '.join([path] + list(argv)) - argstr = shlex.split(from_utf8(args), posix=not IS_WINDOWS) - pipe = Popen(argstr, env=self.env) - self.info(' {0}'.format(' '.join(argstr))) - retcode = pipe.wait() - if retcode < 0: - self.note('* Child was terminated by signal {0}'.format(-retcode)) - return -retcode - elif retcode > 0: - self.note('* Child terminated with errorcode {0}'.format(retcode)) - return retcode - - def error(self, msg=None): - if msg: - self.carp(msg) - self.usage() - self.retcode = 1 - return 1 - - def info(self, msg, newline=True): - if self.verbose: - self.note(msg, newline=newline) - - def note(self, msg, newline=True): - if not self.quiet: - self.say(str(msg), newline=newline) - - @cached_property - def colored(self): - return term.colored(enabled=not self.no_color) - - @cached_property - def OK(self): - return str(self.colored.green('OK')) - - @cached_property - def FAILED(self): - return str(self.colored.red('FAILED')) - - @cached_property - def DOWN(self): - return str(self.colored.magenta('DOWN')) - - -def multi_args(p, cmd='celery worker', append='', prefix='', suffix=''): - names = p.values - options = dict(p.options) - passthrough = p.passthrough - ranges = len(names) == 1 - if ranges: - try: - noderange = int(names[0]) - except ValueError: - pass - else: - names = [str(n) for n in range(1, noderange + 1)] - prefix = 'celery' - cmd = options.pop('--cmd', cmd) - append = options.pop('--append', append) - hostname = options.pop('--hostname', - options.pop('-n', socket.gethostname())) - prefix = options.pop('--prefix', prefix) or '' - suffix = options.pop('--suffix', suffix) or hostname - if suffix in ('""', "''"): - suffix = '' - - for ns_name, ns_opts in list(items(p.namespaces)): - if ',' in ns_name or (ranges and '-' in ns_name): - for subns in parse_ns_range(ns_name, ranges): - p.namespaces[subns].update(ns_opts) - p.namespaces.pop(ns_name) - - # Numbers in args always refers to the index in the list of names. - # (e.g. `start foo bar baz -c:1` where 1 is foo, 2 is bar, and so on). - for ns_name, ns_opts in list(items(p.namespaces)): - if ns_name.isdigit(): - ns_index = int(ns_name) - 1 - if ns_index < 0: - raise KeyError('Indexes start at 1 got: %r' % (ns_name, )) - try: - p.namespaces[names[ns_index]].update(ns_opts) - except IndexError: - raise KeyError('No node at index %r' % (ns_name, )) - - for name in names: - this_suffix = suffix - if '@' in name: - this_name = options['-n'] = name - nodename, this_suffix = nodesplit(name) - name = nodename - else: - nodename = '%s%s' % (prefix, name) - this_name = options['-n'] = '%s@%s' % (nodename, this_suffix) - expand = abbreviations({'%h': this_name, - '%n': name, - '%N': nodename, - '%d': this_suffix}) - argv = ([expand(cmd)] + - [format_opt(opt, expand(value)) - for opt, value in items(p.optmerge(name, options))] + - [passthrough]) - if append: - argv.append(expand(append)) - yield multi_args_t(this_name, argv, expand, name) - - -class NamespacedOptionParser(object): - - def __init__(self, args): - self.args = args - self.options = OrderedDict() - self.values = [] - self.passthrough = '' - self.namespaces = defaultdict(lambda: OrderedDict()) - - self.parse() - - def parse(self): - rargs = list(self.args) - pos = 0 - while pos < len(rargs): - arg = rargs[pos] - if arg == '--': - self.passthrough = ' '.join(rargs[pos:]) - break - elif arg[0] == '-': - if arg[1] == '-': - self.process_long_opt(arg[2:]) - else: - value = None - if len(rargs) > pos + 1 and rargs[pos + 1][0] != '-': - value = rargs[pos + 1] - pos += 1 - self.process_short_opt(arg[1:], value) - else: - self.values.append(arg) - pos += 1 - - def process_long_opt(self, arg, value=None): - if '=' in arg: - arg, value = arg.split('=', 1) - self.add_option(arg, value, short=False) - - def process_short_opt(self, arg, value=None): - self.add_option(arg, value, short=True) - - def optmerge(self, ns, defaults=None): - if defaults is None: - defaults = self.options - return OrderedDict(defaults, **self.namespaces[ns]) - - def add_option(self, name, value, short=False, ns=None): - prefix = short and '-' or '--' - dest = self.options - if ':' in name: - name, ns = name.split(':') - dest = self.namespaces[ns] - dest[prefix + name] = value - - -def quote(v): - return "\\'".join("'" + p + "'" for p in v.split("'")) - - -def format_opt(opt, value): - if not value: - return opt - if opt.startswith('--'): - return '{0}={1}'.format(opt, value) - return '{0} {1}'.format(opt, value) - - -def parse_ns_range(ns, ranges=False): - ret = [] - for space in ',' in ns and ns.split(',') or [ns]: - if ranges and '-' in space: - start, stop = space.split('-') - ret.extend( - str(n) for n in range(int(start), int(stop) + 1) - ) - else: - ret.append(space) - return ret - - -def abbreviations(mapping): - - def expand(S): - ret = S - if S is not None: - for short_opt, long_opt in items(mapping): - ret = ret.replace(short_opt, long_opt) - return ret - - return expand - - -def findsig(args, default=signal.SIGTERM): - for arg in reversed(args): - if len(arg) == 2 and arg[0] == '-': - try: - return int(arg[1]) - except ValueError: - pass - if arg[0] == '-': - maybe_sig = 'SIG' + arg[1:] - if maybe_sig in SIGNAMES: - return getattr(signal, maybe_sig) - return default - - -def _getopt(d, alt): - for opt in alt: - try: - return d[opt] - except KeyError: - pass - raise KeyError(alt[0]) - - -def _setdefaultopt(d, alt, value): - for opt in alt[1:]: - try: - return d[opt] - except KeyError: - pass - return d.setdefault(alt[0], value) - - -if __name__ == '__main__': # pragma: no cover - main() diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/worker.py b/thesisenv/lib/python3.6/site-packages/celery/bin/worker.py deleted file mode 100644 index dc04075..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/bin/worker.py +++ /dev/null @@ -1,270 +0,0 @@ -# -*- coding: utf-8 -*- -""" - -The :program:`celery worker` command (previously known as ``celeryd``) - -.. program:: celery worker - -.. seealso:: - - See :ref:`preload-options`. - -.. cmdoption:: -c, --concurrency - - Number of child processes processing the queue. The default - is the number of CPUs available on your system. - -.. cmdoption:: -P, --pool - - Pool implementation: - - prefork (default), eventlet, gevent, solo or threads. - -.. cmdoption:: -f, --logfile - - Path to log file. If no logfile is specified, `stderr` is used. - -.. cmdoption:: -l, --loglevel - - Logging level, choose between `DEBUG`, `INFO`, `WARNING`, - `ERROR`, `CRITICAL`, or `FATAL`. - -.. cmdoption:: -n, --hostname - - Set custom hostname, e.g. 'w1.%h'. Expands: %h (hostname), - %n (name) and %d, (domain). - -.. cmdoption:: -B, --beat - - Also run the `celery beat` periodic task scheduler. Please note that - there must only be one instance of this service. - -.. cmdoption:: -Q, --queues - - List of queues to enable for this worker, separated by comma. - By default all configured queues are enabled. - Example: `-Q video,image` - -.. cmdoption:: -I, --include - - Comma separated list of additional modules to import. - Example: -I foo.tasks,bar.tasks - -.. cmdoption:: -s, --schedule - - Path to the schedule database if running with the `-B` option. - Defaults to `celerybeat-schedule`. The extension ".db" may be - appended to the filename. - -.. cmdoption:: -O - - Apply optimization profile. Supported: default, fair - -.. cmdoption:: --scheduler - - Scheduler class to use. Default is celery.beat.PersistentScheduler - -.. cmdoption:: -S, --statedb - - Path to the state database. The extension '.db' may - be appended to the filename. Default: {default} - -.. cmdoption:: -E, --events - - Send events that can be captured by monitors like :program:`celery events`, - `celerymon`, and others. - -.. cmdoption:: --without-gossip - - Do not subscribe to other workers events. - -.. cmdoption:: --without-mingle - - Do not synchronize with other workers at startup. - -.. cmdoption:: --without-heartbeat - - Do not send event heartbeats. - -.. cmdoption:: --heartbeat-interval - - Interval in seconds at which to send worker heartbeat - -.. cmdoption:: --purge - - Purges all waiting tasks before the daemon is started. - **WARNING**: This is unrecoverable, and the tasks will be - deleted from the messaging server. - -.. cmdoption:: --time-limit - - Enables a hard time limit (in seconds int/float) for tasks. - -.. cmdoption:: --soft-time-limit - - Enables a soft time limit (in seconds int/float) for tasks. - -.. cmdoption:: --maxtasksperchild - - Maximum number of tasks a pool worker can execute before it's - terminated and replaced by a new worker. - -.. cmdoption:: --pidfile - - Optional file used to store the workers pid. - - The worker will not start if this file already exists - and the pid is still alive. - -.. cmdoption:: --autoscale - - Enable autoscaling by providing - max_concurrency, min_concurrency. Example:: - - --autoscale=10,3 - - (always keep 3 processes, but grow to 10 if necessary) - -.. cmdoption:: --autoreload - - Enable autoreloading. - -.. cmdoption:: --no-execv - - Don't do execv after multiprocessing child fork. - -""" -from __future__ import absolute_import, unicode_literals - -import sys - -from celery import concurrency -from celery.bin.base import Command, Option, daemon_options -from celery.bin.celeryd_detach import detached_celeryd -from celery.five import string_t -from celery.platforms import maybe_drop_privileges -from celery.utils import default_nodename -from celery.utils.log import LOG_LEVELS, mlevel - -__all__ = ['worker', 'main'] - -__MODULE_DOC__ = __doc__ - - -class worker(Command): - """Start worker instance. - - Examples:: - - celery worker --app=proj -l info - celery worker -A proj -l info -Q hipri,lopri - - celery worker -A proj --concurrency=4 - celery worker -A proj --concurrency=1000 -P eventlet - - celery worker --autoscale=10,0 - """ - doc = __MODULE_DOC__ # parse help from this too - namespace = 'celeryd' - enable_config_from_cmdline = True - supports_args = False - - def run_from_argv(self, prog_name, argv=None, command=None): - command = sys.argv[0] if command is None else command - argv = sys.argv[1:] if argv is None else argv - # parse options before detaching so errors can be handled. - options, args = self.prepare_args( - *self.parse_options(prog_name, argv, command)) - self.maybe_detach([command] + argv) - return self(*args, **options) - - def maybe_detach(self, argv, dopts=['-D', '--detach']): - if any(arg in argv for arg in dopts): - argv = [v for v in argv if v not in dopts] - # will never return - detached_celeryd(self.app).execute_from_commandline(argv) - raise SystemExit(0) - - def run(self, hostname=None, pool_cls=None, app=None, uid=None, gid=None, - loglevel=None, logfile=None, pidfile=None, state_db=None, - **kwargs): - maybe_drop_privileges(uid=uid, gid=gid) - # Pools like eventlet/gevent needs to patch libs as early - # as possible. - pool_cls = (concurrency.get_implementation(pool_cls) or - self.app.conf.CELERYD_POOL) - if self.app.IS_WINDOWS and kwargs.get('beat'): - self.die('-B option does not work on Windows. ' - 'Please run celery beat as a separate service.') - hostname = self.host_format(default_nodename(hostname)) - if loglevel: - try: - loglevel = mlevel(loglevel) - except KeyError: # pragma: no cover - self.die('Unknown level {0!r}. Please use one of {1}.'.format( - loglevel, '|'.join( - l for l in LOG_LEVELS if isinstance(l, string_t)))) - - return self.app.Worker( - hostname=hostname, pool_cls=pool_cls, loglevel=loglevel, - logfile=logfile, # node format handled by celery.app.log.setup - pidfile=self.node_format(pidfile, hostname), - state_db=self.node_format(state_db, hostname), **kwargs - ).start() - - def with_pool_option(self, argv): - # this command support custom pools - # that may have to be loaded as early as possible. - return (['-P'], ['--pool']) - - def get_options(self): - conf = self.app.conf - return ( - Option('-c', '--concurrency', - default=conf.CELERYD_CONCURRENCY, type='int'), - Option('-P', '--pool', default=conf.CELERYD_POOL, dest='pool_cls'), - Option('--purge', '--discard', default=False, action='store_true'), - Option('-l', '--loglevel', default=conf.CELERYD_LOG_LEVEL), - Option('-n', '--hostname'), - Option('-B', '--beat', action='store_true'), - Option('-s', '--schedule', dest='schedule_filename', - default=conf.CELERYBEAT_SCHEDULE_FILENAME), - Option('--scheduler', dest='scheduler_cls'), - Option('-S', '--statedb', - default=conf.CELERYD_STATE_DB, dest='state_db'), - Option('-E', '--events', default=conf.CELERY_SEND_EVENTS, - action='store_true', dest='send_events'), - Option('--time-limit', type='float', dest='task_time_limit', - default=conf.CELERYD_TASK_TIME_LIMIT), - Option('--soft-time-limit', dest='task_soft_time_limit', - default=conf.CELERYD_TASK_SOFT_TIME_LIMIT, type='float'), - Option('--maxtasksperchild', dest='max_tasks_per_child', - default=conf.CELERYD_MAX_TASKS_PER_CHILD, type='int'), - Option('--queues', '-Q', default=[]), - Option('--exclude-queues', '-X', default=[]), - Option('--include', '-I', default=[]), - Option('--autoscale'), - Option('--autoreload', action='store_true'), - Option('--no-execv', action='store_true', default=False), - Option('--without-gossip', action='store_true', default=False), - Option('--without-mingle', action='store_true', default=False), - Option('--without-heartbeat', action='store_true', default=False), - Option('--heartbeat-interval', type='int'), - Option('-O', dest='optimization'), - Option('-D', '--detach', action='store_true'), - ) + daemon_options() + tuple(self.app.user_options['worker']) - - -def main(app=None): - # Fix for setuptools generated scripts, so that it will - # work with multiprocessing fork emulation. - # (see multiprocessing.forking.get_preparation_data()) - if __name__ != '__main__': # pragma: no cover - sys.modules['__main__'] = sys.modules[__name__] - from billiard import freeze_support - freeze_support() - worker(app=app).execute_from_commandline() - - -if __name__ == '__main__': # pragma: no cover - main() diff --git a/thesisenv/lib/python3.6/site-packages/celery/bootsteps.py b/thesisenv/lib/python3.6/site-packages/celery/bootsteps.py deleted file mode 100644 index 4471a4c..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/bootsteps.py +++ /dev/null @@ -1,422 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.bootsteps - ~~~~~~~~~~~~~~~~ - - A directed acyclic graph of reusable components. - -""" -from __future__ import absolute_import, unicode_literals - -from collections import deque -from threading import Event - -from kombu.common import ignore_errors -from kombu.utils import symbol_by_name - -from .datastructures import DependencyGraph, GraphFormatter -from .five import values, with_metaclass -from .utils.imports import instantiate, qualname -from .utils.log import get_logger - -try: - from greenlet import GreenletExit - IGNORE_ERRORS = (GreenletExit, ) -except ImportError: # pragma: no cover - IGNORE_ERRORS = () - -__all__ = ['Blueprint', 'Step', 'StartStopStep', 'ConsumerStep'] - -#: States -RUN = 0x1 -CLOSE = 0x2 -TERMINATE = 0x3 - -logger = get_logger(__name__) -debug = logger.debug - - -def _pre(ns, fmt): - return '| {0}: {1}'.format(ns.alias, fmt) - - -def _label(s): - return s.name.rsplit('.', 1)[-1] - - -class StepFormatter(GraphFormatter): - """Graph formatter for :class:`Blueprint`.""" - - blueprint_prefix = '⧉' - conditional_prefix = '∘' - blueprint_scheme = { - 'shape': 'parallelogram', - 'color': 'slategray4', - 'fillcolor': 'slategray3', - } - - def label(self, step): - return step and '{0}{1}'.format( - self._get_prefix(step), - (step.label or _label(step)).encode('utf-8', 'ignore'), - ) - - def _get_prefix(self, step): - if step.last: - return self.blueprint_prefix - if step.conditional: - return self.conditional_prefix - return '' - - def node(self, obj, **attrs): - scheme = self.blueprint_scheme if obj.last else self.node_scheme - return self.draw_node(obj, scheme, attrs) - - def edge(self, a, b, **attrs): - if a.last: - attrs.update(arrowhead='none', color='darkseagreen3') - return self.draw_edge(a, b, self.edge_scheme, attrs) - - -class Blueprint(object): - """Blueprint containing bootsteps that can be applied to objects. - - :keyword steps: List of steps. - :keyword name: Set explicit name for this blueprint. - :keyword app: Set the Celery app for this blueprint. - :keyword on_start: Optional callback applied after blueprint start. - :keyword on_close: Optional callback applied before blueprint close. - :keyword on_stopped: Optional callback applied after blueprint stopped. - - """ - GraphFormatter = StepFormatter - - name = None - state = None - started = 0 - default_steps = set() - state_to_name = { - 0: 'initializing', - RUN: 'running', - CLOSE: 'closing', - TERMINATE: 'terminating', - } - - def __init__(self, steps=None, name=None, app=None, - on_start=None, on_close=None, on_stopped=None): - self.app = app - self.name = name or self.name or qualname(type(self)) - self.types = set(steps or []) | set(self.default_steps) - self.on_start = on_start - self.on_close = on_close - self.on_stopped = on_stopped - self.shutdown_complete = Event() - self.steps = {} - - def start(self, parent): - self.state = RUN - if self.on_start: - self.on_start() - for i, step in enumerate(s for s in parent.steps if s is not None): - self._debug('Starting %s', step.alias) - self.started = i + 1 - step.start(parent) - debug('^-- substep ok') - - def human_state(self): - return self.state_to_name[self.state or 0] - - def info(self, parent): - info = {} - for step in parent.steps: - info.update(step.info(parent) or {}) - return info - - def close(self, parent): - if self.on_close: - self.on_close() - self.send_all(parent, 'close', 'closing', reverse=False) - - def restart(self, parent, method='stop', - description='restarting', propagate=False): - self.send_all(parent, method, description, propagate=propagate) - - def send_all(self, parent, method, - description=None, reverse=True, propagate=True, args=()): - description = description or method.replace('_', ' ') - steps = reversed(parent.steps) if reverse else parent.steps - for step in steps: - if step: - fun = getattr(step, method, None) - if fun is not None: - self._debug('%s %s...', - description.capitalize(), step.alias) - try: - fun(parent, *args) - except Exception as exc: - if propagate: - raise - logger.error( - 'Error on %s %s: %r', - description, step.alias, exc, exc_info=1, - ) - - def stop(self, parent, close=True, terminate=False): - what = 'terminating' if terminate else 'stopping' - if self.state in (CLOSE, TERMINATE): - return - - if self.state != RUN or self.started != len(parent.steps): - # Not fully started, can safely exit. - self.state = TERMINATE - self.shutdown_complete.set() - return - self.close(parent) - self.state = CLOSE - - self.restart( - parent, 'terminate' if terminate else 'stop', - description=what, propagate=False, - ) - - if self.on_stopped: - self.on_stopped() - self.state = TERMINATE - self.shutdown_complete.set() - - def join(self, timeout=None): - try: - # Will only get here if running green, - # makes sure all greenthreads have exited. - self.shutdown_complete.wait(timeout=timeout) - except IGNORE_ERRORS: - pass - - def apply(self, parent, **kwargs): - """Apply the steps in this blueprint to an object. - - This will apply the ``__init__`` and ``include`` methods - of each step, with the object as argument:: - - step = Step(obj) - ... - step.include(obj) - - For :class:`StartStopStep` the services created - will also be added to the objects ``steps`` attribute. - - """ - self._debug('Preparing bootsteps.') - order = self.order = [] - steps = self.steps = self.claim_steps() - - self._debug('Building graph...') - for S in self._finalize_steps(steps): - step = S(parent, **kwargs) - steps[step.name] = step - order.append(step) - self._debug('New boot order: {%s}', - ', '.join(s.alias for s in self.order)) - for step in order: - step.include(parent) - return self - - def connect_with(self, other): - self.graph.adjacent.update(other.graph.adjacent) - self.graph.add_edge(type(other.order[0]), type(self.order[-1])) - - def __getitem__(self, name): - return self.steps[name] - - def _find_last(self): - return next((C for C in values(self.steps) if C.last), None) - - def _firstpass(self, steps): - for step in values(steps): - step.requires = [symbol_by_name(dep) for dep in step.requires] - stream = deque(step.requires for step in values(steps)) - while stream: - for node in stream.popleft(): - node = symbol_by_name(node) - if node.name not in self.steps: - steps[node.name] = node - stream.append(node.requires) - - def _finalize_steps(self, steps): - last = self._find_last() - self._firstpass(steps) - it = ((C, C.requires) for C in values(steps)) - G = self.graph = DependencyGraph( - it, formatter=self.GraphFormatter(root=last), - ) - if last: - for obj in G: - if obj != last: - G.add_edge(last, obj) - try: - return G.topsort() - except KeyError as exc: - raise KeyError('unknown bootstep: %s' % exc) - - def claim_steps(self): - return dict(self.load_step(step) for step in self._all_steps()) - - def _all_steps(self): - return self.types | self.app.steps[self.name.lower()] - - def load_step(self, step): - step = symbol_by_name(step) - return step.name, step - - def _debug(self, msg, *args): - return debug(_pre(self, msg), *args) - - @property - def alias(self): - return _label(self) - - -class StepType(type): - """Metaclass for steps.""" - - def __new__(cls, name, bases, attrs): - module = attrs.get('__module__') - qname = '{0}.{1}'.format(module, name) if module else name - attrs.update( - __qualname__=qname, - name=attrs.get('name') or qname, - ) - return super(StepType, cls).__new__(cls, name, bases, attrs) - - def __str__(self): - return self.name - - def __repr__(self): - return 'step:{0.name}{{{0.requires!r}}}'.format(self) - - -@with_metaclass(StepType) -class Step(object): - """A Bootstep. - - The :meth:`__init__` method is called when the step - is bound to a parent object, and can as such be used - to initialize attributes in the parent object at - parent instantiation-time. - - """ - - #: Optional step name, will use qualname if not specified. - name = None - - #: Optional short name used for graph outputs and in logs. - label = None - - #: Set this to true if the step is enabled based on some condition. - conditional = False - - #: List of other steps that that must be started before this step. - #: Note that all dependencies must be in the same blueprint. - requires = () - - #: This flag is reserved for the workers Consumer, - #: since it is required to always be started last. - #: There can only be one object marked last - #: in every blueprint. - last = False - - #: This provides the default for :meth:`include_if`. - enabled = True - - def __init__(self, parent, **kwargs): - pass - - def include_if(self, parent): - """An optional predicate that decides whether this - step should be created.""" - return self.enabled - - def instantiate(self, name, *args, **kwargs): - return instantiate(name, *args, **kwargs) - - def _should_include(self, parent): - if self.include_if(parent): - return True, self.create(parent) - return False, None - - def include(self, parent): - return self._should_include(parent)[0] - - def create(self, parent): - """Create the step.""" - pass - - def __repr__(self): - return ''.format(self) - - @property - def alias(self): - return self.label or _label(self) - - def info(self, obj): - pass - - -class StartStopStep(Step): - - #: Optional obj created by the :meth:`create` method. - #: This is used by :class:`StartStopStep` to keep the - #: original service object. - obj = None - - def start(self, parent): - if self.obj: - return self.obj.start() - - def stop(self, parent): - if self.obj: - return self.obj.stop() - - def close(self, parent): - pass - - def terminate(self, parent): - if self.obj: - return getattr(self.obj, 'terminate', self.obj.stop)() - - def include(self, parent): - inc, ret = self._should_include(parent) - if inc: - self.obj = ret - parent.steps.append(self) - return inc - - -class ConsumerStep(StartStopStep): - requires = ('celery.worker.consumer:Connection', ) - consumers = None - - def get_consumers(self, channel): - raise NotImplementedError('missing get_consumers') - - def start(self, c): - channel = c.connection.channel() - self.consumers = self.get_consumers(channel) - for consumer in self.consumers or []: - consumer.consume() - - def stop(self, c): - self._close(c, True) - - def shutdown(self, c): - self._close(c, False) - - def _close(self, c, cancel_consumers=True): - channels = set() - for consumer in self.consumers or []: - if cancel_consumers: - ignore_errors(c.connection, consumer.cancel) - if consumer.channel: - channels.add(consumer.channel) - for channel in channels: - ignore_errors(c.connection, channel.close) diff --git a/thesisenv/lib/python3.6/site-packages/celery/canvas.py b/thesisenv/lib/python3.6/site-packages/celery/canvas.py deleted file mode 100644 index 4149e39..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/canvas.py +++ /dev/null @@ -1,698 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.canvas - ~~~~~~~~~~~~~ - - Composing task workflows. - - Documentation for some of these types are in :mod:`celery`. - You should import these from :mod:`celery` and not this module. - - -""" -from __future__ import absolute_import - -from collections import MutableSequence -from copy import deepcopy -from functools import partial as _partial, reduce -from operator import itemgetter -from itertools import chain as _chain - -from kombu.utils import cached_property, fxrange, kwdict, reprcall, uuid - -from celery._state import current_app -from celery.utils.functional import ( - maybe_list, is_list, regen, - chunks as _chunks, -) -from celery.utils.text import truncate - -__all__ = ['Signature', 'chain', 'xmap', 'xstarmap', 'chunks', - 'group', 'chord', 'signature', 'maybe_signature'] - - -class _getitem_property(object): - """Attribute -> dict key descriptor. - - The target object must support ``__getitem__``, - and optionally ``__setitem__``. - - Example: - - >>> from collections import defaultdict - - >>> class Me(dict): - ... deep = defaultdict(dict) - ... - ... foo = _getitem_property('foo') - ... deep_thing = _getitem_property('deep.thing') - - - >>> me = Me() - >>> me.foo - None - - >>> me.foo = 10 - >>> me.foo - 10 - >>> me['foo'] - 10 - - >>> me.deep_thing = 42 - >>> me.deep_thing - 42 - >>> me.deep - defaultdict(, {'thing': 42}) - - """ - - def __init__(self, keypath): - path, _, self.key = keypath.rpartition('.') - self.path = path.split('.') if path else None - - def _path(self, obj): - return (reduce(lambda d, k: d[k], [obj] + self.path) if self.path - else obj) - - def __get__(self, obj, type=None): - if obj is None: - return type - return self._path(obj).get(self.key) - - def __set__(self, obj, value): - self._path(obj)[self.key] = value - - -def maybe_unroll_group(g): - """Unroll group with only one member.""" - # Issue #1656 - try: - size = len(g.tasks) - except TypeError: - try: - size = g.tasks.__length_hint__() - except (AttributeError, TypeError): - pass - else: - return list(g.tasks)[0] if size == 1 else g - else: - return g.tasks[0] if size == 1 else g - - -def _upgrade(fields, sig): - """Used by custom signatures in .from_dict, to keep common fields.""" - sig.update(chord_size=fields.get('chord_size')) - return sig - - -class Signature(dict): - """Class that wraps the arguments and execution options - for a single task invocation. - - Used as the parts in a :class:`group` and other constructs, - or to pass tasks around as callbacks while being compatible - with serializers with a strict type subset. - - :param task: Either a task class/instance, or the name of a task. - :keyword args: Positional arguments to apply. - :keyword kwargs: Keyword arguments to apply. - :keyword options: Additional options to :meth:`Task.apply_async`. - - Note that if the first argument is a :class:`dict`, the other - arguments will be ignored and the values in the dict will be used - instead. - - >>> s = signature('tasks.add', args=(2, 2)) - >>> signature(s) - {'task': 'tasks.add', args=(2, 2), kwargs={}, options={}} - - """ - TYPES = {} - _app = _type = None - - @classmethod - def register_type(cls, subclass, name=None): - cls.TYPES[name or subclass.__name__] = subclass - return subclass - - @classmethod - def from_dict(self, d, app=None): - typ = d.get('subtask_type') - if typ: - return self.TYPES[typ].from_dict(kwdict(d), app=app) - return Signature(d, app=app) - - def __init__(self, task=None, args=None, kwargs=None, options=None, - type=None, subtask_type=None, immutable=False, - app=None, **ex): - self._app = app - init = dict.__init__ - - if isinstance(task, dict): - return init(self, task) # works like dict(d) - - # Also supports using task class/instance instead of string name. - try: - task_name = task.name - except AttributeError: - task_name = task - else: - self._type = task - - init(self, - task=task_name, args=tuple(args or ()), - kwargs=kwargs or {}, - options=dict(options or {}, **ex), - subtask_type=subtask_type, - immutable=immutable, - chord_size=None) - - def __call__(self, *partial_args, **partial_kwargs): - args, kwargs, _ = self._merge(partial_args, partial_kwargs, None) - return self.type(*args, **kwargs) - - def delay(self, *partial_args, **partial_kwargs): - return self.apply_async(partial_args, partial_kwargs) - - def apply(self, args=(), kwargs={}, **options): - """Apply this task locally.""" - # For callbacks: extra args are prepended to the stored args. - args, kwargs, options = self._merge(args, kwargs, options) - return self.type.apply(args, kwargs, **options) - - def _merge(self, args=(), kwargs={}, options={}): - if self.immutable: - return (self.args, self.kwargs, - dict(self.options, **options) if options else self.options) - return (tuple(args) + tuple(self.args) if args else self.args, - dict(self.kwargs, **kwargs) if kwargs else self.kwargs, - dict(self.options, **options) if options else self.options) - - def clone(self, args=(), kwargs={}, app=None, **opts): - # need to deepcopy options so origins links etc. is not modified. - if args or kwargs or opts: - args, kwargs, opts = self._merge(args, kwargs, opts) - else: - args, kwargs, opts = self.args, self.kwargs, self.options - s = Signature.from_dict({'task': self.task, 'args': tuple(args), - 'kwargs': kwargs, 'options': deepcopy(opts), - 'subtask_type': self.subtask_type, - 'chord_size': self.chord_size, - 'immutable': self.immutable}, - app=app or self._app) - s._type = self._type - return s - partial = clone - - def freeze(self, _id=None, group_id=None, chord=None): - opts = self.options - try: - tid = opts['task_id'] - except KeyError: - tid = opts['task_id'] = _id or uuid() - if 'reply_to' not in opts: - opts['reply_to'] = self.app.oid - if group_id: - opts['group_id'] = group_id - if chord: - opts['chord'] = chord - return self.app.AsyncResult(tid) - _freeze = freeze - - def replace(self, args=None, kwargs=None, options=None): - s = self.clone() - if args is not None: - s.args = args - if kwargs is not None: - s.kwargs = kwargs - if options is not None: - s.options = options - return s - - def set(self, immutable=None, **options): - if immutable is not None: - self.set_immutable(immutable) - self.options.update(options) - return self - - def set_immutable(self, immutable): - self.immutable = immutable - - def apply_async(self, args=(), kwargs={}, **options): - try: - _apply = self._apply_async - except IndexError: # no tasks for chain, etc to find type - return - # For callbacks: extra args are prepended to the stored args. - if args or kwargs or options: - args, kwargs, options = self._merge(args, kwargs, options) - else: - args, kwargs, options = self.args, self.kwargs, self.options - return _apply(args, kwargs, **options) - - def append_to_list_option(self, key, value): - items = self.options.setdefault(key, []) - if not isinstance(items, MutableSequence): - items = self.options[key] = [items] - if value not in items: - items.append(value) - return value - - def link(self, callback): - return self.append_to_list_option('link', callback) - - def link_error(self, errback): - return self.append_to_list_option('link_error', errback) - - def flatten_links(self): - return list(_chain.from_iterable(_chain( - [[self]], - (link.flatten_links() - for link in maybe_list(self.options.get('link')) or []) - ))) - - def __or__(self, other): - if isinstance(other, group): - other = maybe_unroll_group(other) - if not isinstance(self, chain) and isinstance(other, chain): - return chain((self, ) + other.tasks, app=self._app) - elif isinstance(other, chain): - return chain(*self.tasks + other.tasks, app=self._app) - elif isinstance(other, Signature): - if isinstance(self, chain): - return chain(*self.tasks + (other, ), app=self._app) - return chain(self, other, app=self._app) - return NotImplemented - - def __deepcopy__(self, memo): - memo[id(self)] = self - return dict(self) - - def __invert__(self): - return self.apply_async().get() - - def __reduce__(self): - # for serialization, the task type is lazily loaded, - # and not stored in the dict itself. - return subtask, (dict(self), ) - - def reprcall(self, *args, **kwargs): - args, kwargs, _ = self._merge(args, kwargs, {}) - return reprcall(self['task'], args, kwargs) - - def election(self): - type = self.type - app = type.app - tid = self.options.get('task_id') or uuid() - - with app.producer_or_acquire(None) as P: - props = type.backend.on_task_call(P, tid) - app.control.election(tid, 'task', self.clone(task_id=tid, **props), - connection=P.connection) - return type.AsyncResult(tid) - - def __repr__(self): - return self.reprcall() - - @cached_property - def type(self): - return self._type or self.app.tasks[self['task']] - - @cached_property - def app(self): - return self._app or current_app - - @cached_property - def AsyncResult(self): - try: - return self.type.AsyncResult - except KeyError: # task not registered - return self.app.AsyncResult - - @cached_property - def _apply_async(self): - try: - return self.type.apply_async - except KeyError: - return _partial(self.app.send_task, self['task']) - id = _getitem_property('options.task_id') - task = _getitem_property('task') - args = _getitem_property('args') - kwargs = _getitem_property('kwargs') - options = _getitem_property('options') - subtask_type = _getitem_property('subtask_type') - chord_size = _getitem_property('chord_size') - immutable = _getitem_property('immutable') - - -@Signature.register_type -class chain(Signature): - - def __init__(self, *tasks, **options): - tasks = (regen(tasks[0]) if len(tasks) == 1 and is_list(tasks[0]) - else tasks) - Signature.__init__( - self, 'celery.chain', (), {'tasks': tasks}, **options - ) - self.tasks = tasks - self.subtask_type = 'chain' - - def __call__(self, *args, **kwargs): - if self.tasks: - return self.apply_async(args, kwargs) - - @classmethod - def from_dict(self, d, app=None): - tasks = [maybe_signature(t, app=app) for t in d['kwargs']['tasks']] - if d['args'] and tasks: - # partial args passed on to first task in chain (Issue #1057). - tasks[0]['args'] = tasks[0]._merge(d['args'])[0] - return _upgrade(d, chain(*tasks, app=app, **d['options'])) - - @property - def type(self): - try: - return self._type or self.tasks[0].type.app.tasks['celery.chain'] - except KeyError: - return self.app.tasks['celery.chain'] - - def __repr__(self): - return ' | '.join(repr(t) for t in self.tasks) - - -class _basemap(Signature): - _task_name = None - _unpack_args = itemgetter('task', 'it') - - def __init__(self, task, it, **options): - Signature.__init__( - self, self._task_name, (), - {'task': task, 'it': regen(it)}, immutable=True, **options - ) - - def apply_async(self, args=(), kwargs={}, **opts): - # need to evaluate generators - task, it = self._unpack_args(self.kwargs) - return self.type.apply_async( - (), {'task': task, 'it': list(it)}, **opts - ) - - @classmethod - def from_dict(cls, d, app=None): - return _upgrade( - d, cls(*cls._unpack_args(d['kwargs']), app=app, **d['options']), - ) - - -@Signature.register_type -class xmap(_basemap): - _task_name = 'celery.map' - - def __repr__(self): - task, it = self._unpack_args(self.kwargs) - return '[{0}(x) for x in {1}]'.format(task.task, - truncate(repr(it), 100)) - - -@Signature.register_type -class xstarmap(_basemap): - _task_name = 'celery.starmap' - - def __repr__(self): - task, it = self._unpack_args(self.kwargs) - return '[{0}(*x) for x in {1}]'.format(task.task, - truncate(repr(it), 100)) - - -@Signature.register_type -class chunks(Signature): - _unpack_args = itemgetter('task', 'it', 'n') - - def __init__(self, task, it, n, **options): - Signature.__init__( - self, 'celery.chunks', (), - {'task': task, 'it': regen(it), 'n': n}, - immutable=True, **options - ) - - @classmethod - def from_dict(self, d, app=None): - return _upgrade( - d, chunks(*self._unpack_args( - d['kwargs']), app=app, **d['options']), - ) - - def apply_async(self, args=(), kwargs={}, **opts): - return self.group().apply_async(args, kwargs, **opts) - - def __call__(self, **options): - return self.group()(**options) - - def group(self): - # need to evaluate generators - task, it, n = self._unpack_args(self.kwargs) - return group((xstarmap(task, part, app=self._app) - for part in _chunks(iter(it), n)), - app=self._app) - - @classmethod - def apply_chunks(cls, task, it, n, app=None): - return cls(task, it, n, app=app)() - - -def _maybe_group(tasks): - if isinstance(tasks, group): - tasks = list(tasks.tasks) - elif isinstance(tasks, Signature): - tasks = [tasks] - else: - tasks = regen(tasks) - return tasks - - -def _maybe_clone(tasks, app): - return [s.clone() if isinstance(s, Signature) else signature(s, app=app) - for s in tasks] - - -@Signature.register_type -class group(Signature): - - def __init__(self, *tasks, **options): - if len(tasks) == 1: - tasks = _maybe_group(tasks[0]) - Signature.__init__( - self, 'celery.group', (), {'tasks': tasks}, **options - ) - self.tasks, self.subtask_type = tasks, 'group' - - @classmethod - def from_dict(self, d, app=None): - tasks = [maybe_signature(t, app=app) for t in d['kwargs']['tasks']] - if d['args'] and tasks: - # partial args passed on to all tasks in the group (Issue #1057). - for task in tasks: - task['args'] = task._merge(d['args'])[0] - return _upgrade(d, group(tasks, app=app, **kwdict(d['options']))) - - def apply_async(self, args=(), kwargs=None, add_to_parent=True, **options): - tasks = _maybe_clone(self.tasks, app=self._app) - if not tasks: - return self.freeze() - type = self.type - return type(*type.prepare(dict(self.options, **options), tasks, args), - add_to_parent=add_to_parent) - - def set_immutable(self, immutable): - for task in self.tasks: - task.set_immutable(immutable) - - def link(self, sig): - # Simply link to first task - sig = sig.clone().set(immutable=True) - return self.tasks[0].link(sig) - - def link_error(self, sig): - sig = sig.clone().set(immutable=True) - return self.tasks[0].link_error(sig) - - def apply(self, *args, **kwargs): - if not self.tasks: - return self.freeze() # empty group returns GroupResult - return Signature.apply(self, *args, **kwargs) - - def __call__(self, *partial_args, **options): - return self.apply_async(partial_args, **options) - - def freeze(self, _id=None, group_id=None, chord=None): - opts = self.options - try: - gid = opts['task_id'] - except KeyError: - gid = opts['task_id'] = uuid() - if group_id: - opts['group_id'] = group_id - if chord: - opts['chord'] = group_id - new_tasks, results = [], [] - for task in self.tasks: - task = maybe_signature(task, app=self._app).clone() - results.append(task.freeze(group_id=group_id, chord=chord)) - new_tasks.append(task) - self.tasks = self.kwargs['tasks'] = new_tasks - return self.app.GroupResult(gid, results) - _freeze = freeze - - def skew(self, start=1.0, stop=None, step=1.0): - it = fxrange(start, stop, step, repeatlast=True) - for task in self.tasks: - task.set(countdown=next(it)) - return self - - def __iter__(self): - return iter(self.tasks) - - def __repr__(self): - return repr(self.tasks) - - @property - def app(self): - return self._app or (self.tasks[0].app if self.tasks else current_app) - - @property - def type(self): - if self._type: - return self._type - # taking the app from the first task in the list, there may be a - # better solution for this, e.g. to consolidate tasks with the same - # app and apply them in batches. - return self.app.tasks[self['task']] - - -@Signature.register_type -class chord(Signature): - - def __init__(self, header, body=None, task='celery.chord', - args=(), kwargs={}, **options): - Signature.__init__( - self, task, args, - dict(kwargs, header=_maybe_group(header), - body=maybe_signature(body, app=self._app)), **options - ) - self.subtask_type = 'chord' - - def apply(self, args=(), kwargs={}, **options): - # For callbacks: extra args are prepended to the stored args. - args, kwargs, options = self._merge(args, kwargs, options) - return self.type.apply(args, kwargs, **options) - - def freeze(self, _id=None, group_id=None, chord=None): - return self.body.freeze(_id, group_id=group_id, chord=chord) - - @classmethod - def from_dict(self, d, app=None): - args, d['kwargs'] = self._unpack_args(**kwdict(d['kwargs'])) - return _upgrade(d, self(*args, app=app, **kwdict(d))) - - @staticmethod - def _unpack_args(header=None, body=None, **kwargs): - # Python signatures are better at extracting keys from dicts - # than manually popping things off. - return (header, body), kwargs - - @property - def app(self): - # we will be able to fix this mess in 3.2 when we no longer - # require an actual task implementation for chord/group - if self._app: - return self._app - app = None if self.body is None else self.body.app - if app is None: - try: - app = self.tasks[0].app - except IndexError: - app = None - return app if app is not None else current_app - - @property - def type(self): - if self._type: - return self._type - return self.app.tasks['celery.chord'] - - def delay(self, *partial_args, **partial_kwargs): - # There's no partial_kwargs for chord. - return self.apply_async(partial_args) - - def apply_async(self, args=(), kwargs={}, task_id=None, - producer=None, publisher=None, connection=None, - router=None, result_cls=None, **options): - args = (tuple(args) + tuple(self.args) - if args and not self.immutable else self.args) - body = kwargs.get('body') or self.kwargs['body'] - kwargs = dict(self.kwargs, **kwargs) - body = body.clone(**options) - - _chord = self.type - if _chord.app.conf.CELERY_ALWAYS_EAGER: - return self.apply(args, kwargs, task_id=task_id, **options) - res = body.freeze(task_id) - parent = _chord(self.tasks, body, args, **options) - res.parent = parent - return res - - def __call__(self, body=None, **options): - return self.apply_async( - (), {'body': body} if body else {}, **options) - - def clone(self, *args, **kwargs): - s = Signature.clone(self, *args, **kwargs) - # need to make copy of body - try: - s.kwargs['body'] = s.kwargs['body'].clone() - except (AttributeError, KeyError): - pass - return s - - def link(self, callback): - self.body.link(callback) - return callback - - def link_error(self, errback): - self.body.link_error(errback) - return errback - - def set_immutable(self, immutable): - # changes mutability of header only, not callback. - for task in self.tasks: - task.set_immutable(immutable) - - def __repr__(self): - if self.body: - return self.body.reprcall(self.tasks) - return ''.format(self) - - tasks = _getitem_property('kwargs.header') - body = _getitem_property('kwargs.body') - - -def signature(varies, args=(), kwargs={}, options={}, app=None, **kw): - if isinstance(varies, dict): - if isinstance(varies, Signature): - return varies.clone(app=app) - return Signature.from_dict(varies, app=app) - return Signature(varies, args, kwargs, options, app=app, **kw) -subtask = signature # XXX compat - - -def maybe_signature(d, app=None): - if d is not None: - if isinstance(d, dict): - if not isinstance(d, Signature): - return signature(d, app=app) - elif isinstance(d, list): - return [maybe_signature(s, app=app) for s in d] - if app is not None: - d._app = app - return d -maybe_subtask = maybe_signature # XXX compat diff --git a/thesisenv/lib/python3.6/site-packages/celery/concurrency/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/concurrency/__init__.py deleted file mode 100644 index c58fdbc..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/concurrency/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.concurrency - ~~~~~~~~~~~~~~~~~~ - - Pool implementation abstract factory, and alias definitions. - -""" -from __future__ import absolute_import - -# Import from kombu directly as it's used -# early in the import stage, where celery.utils loads -# too much (e.g. for eventlet patching) -from kombu.utils import symbol_by_name - -__all__ = ['get_implementation'] - -ALIASES = { - 'prefork': 'celery.concurrency.prefork:TaskPool', - 'eventlet': 'celery.concurrency.eventlet:TaskPool', - 'gevent': 'celery.concurrency.gevent:TaskPool', - 'threads': 'celery.concurrency.threads:TaskPool', - 'solo': 'celery.concurrency.solo:TaskPool', - 'processes': 'celery.concurrency.prefork:TaskPool', # XXX compat alias -} - - -def get_implementation(cls): - return symbol_by_name(cls, ALIASES) diff --git a/thesisenv/lib/python3.6/site-packages/celery/concurrency/asynpool.py b/thesisenv/lib/python3.6/site-packages/celery/concurrency/asynpool.py deleted file mode 100644 index bc29d9c..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/concurrency/asynpool.py +++ /dev/null @@ -1,1270 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.concurrency.asynpool - ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - .. note:: - - This module will be moved soon, so don't use it directly. - - Non-blocking version of :class:`multiprocessing.Pool`. - - This code deals with three major challenges: - - 1) Starting up child processes and keeping them running. - 2) Sending jobs to the processes and receiving results back. - 3) Safely shutting down this system. - -""" -from __future__ import absolute_import - -import errno -import gc -import os -import select -import socket -import struct -import sys -import time - -from collections import deque, namedtuple -from io import BytesIO -from pickle import HIGHEST_PROTOCOL -from time import sleep -from weakref import WeakValueDictionary, ref - -from amqp.utils import promise -from billiard.pool import RUN, TERMINATE, ACK, NACK, WorkersJoined -from billiard import pool as _pool -from billiard.compat import buf_t, setblocking, isblocking -from billiard.einfo import ExceptionInfo -from billiard.queues import _SimpleQueue -from kombu.async import READ, WRITE, ERR -from kombu.serialization import pickle as _pickle -from kombu.utils import fxrange -from kombu.utils.compat import get_errno -from kombu.utils.eventio import SELECT_BAD_FD -from celery.five import Counter, items, string_t, text_t, values -from celery.utils.log import get_logger -from celery.utils.text import truncate -from celery.worker import state as worker_state - -try: - from _billiard import read as __read__ - from struct import unpack_from as _unpack_from - memoryview = memoryview - readcanbuf = True - - if sys.version_info[0] == 2 and sys.version_info < (2, 7, 6): - - def unpack_from(fmt, view, _unpack_from=_unpack_from): # noqa - return _unpack_from(fmt, view.tobytes()) # <- memoryview - else: - # unpack_from supports memoryview in 2.7.6 and 3.3+ - unpack_from = _unpack_from # noqa - -except (ImportError, NameError): # pragma: no cover - - def __read__(fd, buf, size, read=os.read): # noqa - chunk = read(fd, size) - n = len(chunk) - if n != 0: - buf.write(chunk) - return n - readcanbuf = False # noqa - - def unpack_from(fmt, iobuf, unpack=struct.unpack): # noqa - return unpack(fmt, iobuf.getvalue()) # <-- BytesIO - - -logger = get_logger(__name__) -error, debug = logger.error, logger.debug - -UNAVAIL = frozenset([errno.EAGAIN, errno.EINTR]) - -#: Constant sent by child process when started (ready to accept work) -WORKER_UP = 15 - -#: A process must have started before this timeout (in secs.) expires. -PROC_ALIVE_TIMEOUT = 4.0 - -SCHED_STRATEGY_PREFETCH = 1 -SCHED_STRATEGY_FAIR = 4 - -SCHED_STRATEGIES = { - None: SCHED_STRATEGY_PREFETCH, - 'fair': SCHED_STRATEGY_FAIR, -} - -RESULT_MAXLEN = 128 - -Ack = namedtuple('Ack', ('id', 'fd', 'payload')) - - -def gen_not_started(gen): - # gi_frame is None when generator stopped. - return gen.gi_frame and gen.gi_frame.f_lasti == -1 - - -def _get_job_writer(job): - try: - writer = job._writer - except AttributeError: - pass - else: - return writer() # is a weakref - - -def _select(readers=None, writers=None, err=None, timeout=0): - """Simple wrapper to :class:`~select.select`. - - :param readers: Set of reader fds to test if readable. - :param writers: Set of writer fds to test if writable. - :param err: Set of fds to test for error condition. - - All fd sets passed must be mutable as this function - will remove non-working fds from them, this also means - the caller must make sure there are still fds in the sets - before calling us again. - - :returns: tuple of ``(readable, writable, again)``, where - ``readable`` is a set of fds that have data available for read, - ``writable`` is a set of fds that is ready to be written to - and ``again`` is a flag that if set means the caller must - throw away the result and call us again. - - """ - readers = set() if readers is None else readers - writers = set() if writers is None else writers - err = set() if err is None else err - try: - r, w, e = select.select(readers, writers, err, timeout) - if e: - r = list(set(r) | set(e)) - return r, w, 0 - except (select.error, socket.error) as exc: - if get_errno(exc) == errno.EINTR: - return [], [], 1 - elif get_errno(exc) in SELECT_BAD_FD: - for fd in readers | writers | err: - try: - select.select([fd], [], [], 0) - except (select.error, socket.error) as exc: - if get_errno(exc) not in SELECT_BAD_FD: - raise - readers.discard(fd) - writers.discard(fd) - err.discard(fd) - return [], [], 1 - else: - raise - - -def _repr_result(obj): - try: - return repr(obj) - except Exception as orig_exc: - try: - return text_t(obj) - except UnicodeDecodeError: - if isinstance(obj, string_t): - try: - return obj.decode('utf-8', errors='replace') - except Exception: - pass - return ''.format( - orig_exc, - ) - - -class Worker(_pool.Worker): - """Pool worker process.""" - dead = False - - def on_loop_start(self, pid): - # our version sends a WORKER_UP message when the process is ready - # to accept work, this will tell the parent that the inqueue fd - # is writable. - self.outq.put((WORKER_UP, (pid, ))) - - def prepare_result(self, result, maxlen=RESULT_MAXLEN, truncate=truncate): - if not isinstance(result, ExceptionInfo): - return truncate(_repr_result(result), maxlen) - return result - - -class ResultHandler(_pool.ResultHandler): - """Handles messages from the pool processes.""" - - def __init__(self, *args, **kwargs): - self.fileno_to_outq = kwargs.pop('fileno_to_outq') - self.on_process_alive = kwargs.pop('on_process_alive') - super(ResultHandler, self).__init__(*args, **kwargs) - # add our custom message handler - self.state_handlers[WORKER_UP] = self.on_process_alive - - def _recv_message(self, add_reader, fd, callback, - __read__=__read__, readcanbuf=readcanbuf, - BytesIO=BytesIO, unpack_from=unpack_from, - load=_pickle.load): - Hr = Br = 0 - if readcanbuf: - buf = bytearray(4) - bufv = memoryview(buf) - else: - buf = bufv = BytesIO() - # header - - while Hr < 4: - try: - n = __read__( - fd, bufv[Hr:] if readcanbuf else bufv, 4 - Hr, - ) - except OSError as exc: - if get_errno(exc) not in UNAVAIL: - raise - yield - else: - if n == 0: - raise (OSError('End of file during message') if Hr - else EOFError()) - Hr += n - - body_size, = unpack_from('>i', bufv) - if readcanbuf: - buf = bytearray(body_size) - bufv = memoryview(buf) - else: - buf = bufv = BytesIO() - - while Br < body_size: - try: - n = __read__( - fd, bufv[Br:] if readcanbuf else bufv, body_size - Br, - ) - except OSError as exc: - if get_errno(exc) not in UNAVAIL: - raise - yield - else: - if n == 0: - raise (OSError('End of file during message') if Br - else EOFError()) - Br += n - add_reader(fd, self.handle_event, fd) - if readcanbuf: - message = load(BytesIO(bufv)) - else: - bufv.seek(0) - message = load(bufv) - if message: - callback(message) - - def _make_process_result(self, hub): - """Coroutine that reads messages from the pool processes - and calls the appropriate handler.""" - fileno_to_outq = self.fileno_to_outq - on_state_change = self.on_state_change - add_reader = hub.add_reader - remove_reader = hub.remove_reader - recv_message = self._recv_message - - def on_result_readable(fileno): - try: - fileno_to_outq[fileno] - except KeyError: # process gone - return remove_reader(fileno) - it = recv_message(add_reader, fileno, on_state_change) - try: - next(it) - except StopIteration: - pass - except (IOError, OSError, EOFError): - remove_reader(fileno) - else: - add_reader(fileno, it) - return on_result_readable - - def register_with_event_loop(self, hub): - self.handle_event = self._make_process_result(hub) - - def handle_event(self, fileno): - raise RuntimeError('Not registered with event loop') - - def on_stop_not_started(self): - """This method is always used to stop when the helper thread is not - started.""" - cache = self.cache - check_timeouts = self.check_timeouts - fileno_to_outq = self.fileno_to_outq - on_state_change = self.on_state_change - join_exited_workers = self.join_exited_workers - - # flush the processes outqueues until they have all terminated. - outqueues = set(fileno_to_outq) - while cache and outqueues and self._state != TERMINATE: - if check_timeouts is not None: - # make sure tasks with a time limit will time out. - check_timeouts() - # cannot iterate and remove at the same time - pending_remove_fd = set() - for fd in outqueues: - self._flush_outqueue( - fd, pending_remove_fd.discard, fileno_to_outq, - on_state_change, - ) - try: - join_exited_workers(shutdown=True) - except WorkersJoined: - return debug('result handler: all workers terminated') - outqueues.difference_update(pending_remove_fd) - - def _flush_outqueue(self, fd, remove, process_index, on_state_change): - try: - proc = process_index[fd] - except KeyError: - # process already found terminated - # which means its outqueue has already been processed - # by the worker lost handler. - return remove(fd) - - reader = proc.outq._reader - try: - setblocking(reader, 1) - except (OSError, IOError): - return remove(fd) - try: - if reader.poll(0): - task = reader.recv() - else: - task = None - sleep(0.5) - except (IOError, EOFError): - return remove(fd) - else: - if task: - on_state_change(task) - finally: - try: - setblocking(reader, 0) - except (OSError, IOError): - return remove(fd) - - -class AsynPool(_pool.Pool): - """Pool version that uses AIO instead of helper threads.""" - ResultHandler = ResultHandler - Worker = Worker - - def __init__(self, processes=None, synack=False, - sched_strategy=None, *args, **kwargs): - self.sched_strategy = SCHED_STRATEGIES.get(sched_strategy, - sched_strategy) - processes = self.cpu_count() if processes is None else processes - self.synack = synack - # create queue-pairs for all our processes in advance. - self._queues = dict((self.create_process_queues(), None) - for _ in range(processes)) - - # inqueue fileno -> process mapping - self._fileno_to_inq = {} - # outqueue fileno -> process mapping - self._fileno_to_outq = {} - # synqueue fileno -> process mapping - self._fileno_to_synq = {} - - # We keep track of processes that have not yet - # sent a WORKER_UP message. If a process fails to send - # this message within proc_up_timeout we terminate it - # and hope the next process will recover. - self._proc_alive_timeout = PROC_ALIVE_TIMEOUT - self._waiting_to_start = set() - - # denormalized set of all inqueues. - self._all_inqueues = set() - - # Set of fds being written to (busy) - self._active_writes = set() - - # Set of active co-routines currently writing jobs. - self._active_writers = set() - - # Set of fds that are busy (executing task) - self._busy_workers = set() - self._mark_worker_as_available = self._busy_workers.discard - - # Holds jobs waiting to be written to child processes. - self.outbound_buffer = deque() - - self.write_stats = Counter() - - super(AsynPool, self).__init__(processes, *args, **kwargs) - - for proc in self._pool: - # create initial mappings, these will be updated - # as processes are recycled, or found lost elsewhere. - self._fileno_to_outq[proc.outqR_fd] = proc - self._fileno_to_synq[proc.synqW_fd] = proc - self.on_soft_timeout = self.on_hard_timeout = None - if self._timeout_handler: - self.on_soft_timeout = self._timeout_handler.on_soft_timeout - self.on_hard_timeout = self._timeout_handler.on_hard_timeout - - def _create_worker_process(self, i): - gc.collect() # Issue #2927 - return super(AsynPool, self)._create_worker_process(i) - - def _event_process_exit(self, hub, proc): - # This method is called whenever the process sentinel is readable. - self._untrack_child_process(proc, hub) - self.maintain_pool() - - def _track_child_process(self, proc, hub): - try: - fd = proc._sentinel_poll - except AttributeError: - # we need to duplicate the fd here to carefully - # control when the fd is removed from the process table, - # as once the original fd is closed we cannot unregister - # the fd from epoll(7) anymore, causing a 100% CPU poll loop. - fd = proc._sentinel_poll = os.dup(proc._popen.sentinel) - hub.add_reader(fd, self._event_process_exit, hub, proc) - - def _untrack_child_process(self, proc, hub): - if proc._sentinel_poll is not None: - fd, proc._sentinel_poll = proc._sentinel_poll, None - hub.remove(fd) - os.close(fd) - - def register_with_event_loop(self, hub): - """Registers the async pool with the current event loop.""" - self._result_handler.register_with_event_loop(hub) - self.handle_result_event = self._result_handler.handle_event - self._create_timelimit_handlers(hub) - self._create_process_handlers(hub) - self._create_write_handlers(hub) - - # Add handler for when a process exits (calls maintain_pool) - [self._track_child_process(w, hub) for w in self._pool] - # Handle_result_event is called whenever one of the - # result queues are readable. - [hub.add_reader(fd, self.handle_result_event, fd) - for fd in self._fileno_to_outq] - - # Timers include calling maintain_pool at a regular interval - # to be certain processes are restarted. - for handler, interval in items(self.timers): - hub.call_repeatedly(interval, handler) - - hub.on_tick.add(self.on_poll_start) - - def _create_timelimit_handlers(self, hub, now=time.time): - """For async pool this sets up the handlers used - to implement time limits.""" - call_later = hub.call_later - trefs = self._tref_for_id = WeakValueDictionary() - - def on_timeout_set(R, soft, hard): - if soft: - trefs[R._job] = call_later( - soft, self._on_soft_timeout, R._job, soft, hard, hub, - ) - elif hard: - trefs[R._job] = call_later( - hard, self._on_hard_timeout, R._job, - ) - self.on_timeout_set = on_timeout_set - - def _discard_tref(job): - try: - tref = trefs.pop(job) - tref.cancel() - del(tref) - except (KeyError, AttributeError): - pass # out of scope - self._discard_tref = _discard_tref - - def on_timeout_cancel(R): - _discard_tref(R._job) - self.on_timeout_cancel = on_timeout_cancel - - def _on_soft_timeout(self, job, soft, hard, hub, now=time.time): - # only used by async pool. - if hard: - self._tref_for_id[job] = hub.call_at( - now() + (hard - soft), self._on_hard_timeout, job, - ) - try: - result = self._cache[job] - except KeyError: - pass # job ready - else: - self.on_soft_timeout(result) - finally: - if not hard: - # remove tref - self._discard_tref(job) - - def _on_hard_timeout(self, job): - # only used by async pool. - try: - result = self._cache[job] - except KeyError: - pass # job ready - else: - self.on_hard_timeout(result) - finally: - # remove tref - self._discard_tref(job) - - def on_job_ready(self, job, i, obj, inqW_fd): - self._mark_worker_as_available(inqW_fd) - - def _create_process_handlers(self, hub, READ=READ, ERR=ERR): - """For async pool this will create the handlers called - when a process is up/down and etc.""" - add_reader, remove_reader, remove_writer = ( - hub.add_reader, hub.remove_reader, hub.remove_writer, - ) - cache = self._cache - all_inqueues = self._all_inqueues - fileno_to_inq = self._fileno_to_inq - fileno_to_outq = self._fileno_to_outq - fileno_to_synq = self._fileno_to_synq - busy_workers = self._busy_workers - handle_result_event = self.handle_result_event - process_flush_queues = self.process_flush_queues - waiting_to_start = self._waiting_to_start - - def verify_process_alive(proc): - proc = proc() # is a weakref - if (proc is not None and proc._is_alive() and - proc in waiting_to_start): - assert proc.outqR_fd in fileno_to_outq - assert fileno_to_outq[proc.outqR_fd] is proc - assert proc.outqR_fd in hub.readers - error('Timed out waiting for UP message from %r', proc) - os.kill(proc.pid, 9) - - def on_process_up(proc): - """Called when a process has started.""" - # If we got the same fd as a previous process then we will also - # receive jobs in the old buffer, so we need to reset the - # job._write_to and job._scheduled_for attributes used to recover - # message boundaries when processes exit. - infd = proc.inqW_fd - for job in values(cache): - if job._write_to and job._write_to.inqW_fd == infd: - job._write_to = proc - if job._scheduled_for and job._scheduled_for.inqW_fd == infd: - job._scheduled_for = proc - fileno_to_outq[proc.outqR_fd] = proc - - # maintain_pool is called whenever a process exits. - self._track_child_process(proc, hub) - - assert not isblocking(proc.outq._reader) - - # handle_result_event is called when the processes outqueue is - # readable. - add_reader(proc.outqR_fd, handle_result_event, proc.outqR_fd) - - waiting_to_start.add(proc) - hub.call_later( - self._proc_alive_timeout, verify_process_alive, ref(proc), - ) - - self.on_process_up = on_process_up - - def _remove_from_index(obj, proc, index, remove_fun, callback=None): - # this remove the file descriptors for a process from - # the indices. we have to make sure we don't overwrite - # another processes fds, as the fds may be reused. - try: - fd = obj.fileno() - except (IOError, OSError): - return - - try: - if index[fd] is proc: - # fd has not been reused so we can remove it from index. - index.pop(fd, None) - except KeyError: - pass - else: - remove_fun(fd) - if callback is not None: - callback(fd) - return fd - - def on_process_down(proc): - """Called when a worker process exits.""" - if getattr(proc, 'dead', None): - return - process_flush_queues(proc) - _remove_from_index( - proc.outq._reader, proc, fileno_to_outq, remove_reader, - ) - if proc.synq: - _remove_from_index( - proc.synq._writer, proc, fileno_to_synq, remove_writer, - ) - inq = _remove_from_index( - proc.inq._writer, proc, fileno_to_inq, remove_writer, - callback=all_inqueues.discard, - ) - if inq: - busy_workers.discard(inq) - self._untrack_child_process(proc, hub) - waiting_to_start.discard(proc) - self._active_writes.discard(proc.inqW_fd) - remove_writer(proc.inq._writer) - remove_reader(proc.outq._reader) - if proc.synqR_fd: - remove_reader(proc.synq._reader) - if proc.synqW_fd: - self._active_writes.discard(proc.synqW_fd) - remove_reader(proc.synq._writer) - self.on_process_down = on_process_down - - def _create_write_handlers(self, hub, - pack=struct.pack, dumps=_pickle.dumps, - protocol=HIGHEST_PROTOCOL): - """For async pool this creates the handlers used to write data to - child processes.""" - fileno_to_inq = self._fileno_to_inq - fileno_to_synq = self._fileno_to_synq - outbound = self.outbound_buffer - pop_message = outbound.popleft - append_message = outbound.append - put_back_message = outbound.appendleft - all_inqueues = self._all_inqueues - active_writes = self._active_writes - active_writers = self._active_writers - busy_workers = self._busy_workers - diff = all_inqueues.difference - add_writer = hub.add_writer - hub_add, hub_remove = hub.add, hub.remove - mark_write_fd_as_active = active_writes.add - mark_write_gen_as_active = active_writers.add - mark_worker_as_busy = busy_workers.add - write_generator_done = active_writers.discard - get_job = self._cache.__getitem__ - write_stats = self.write_stats - is_fair_strategy = self.sched_strategy == SCHED_STRATEGY_FAIR - revoked_tasks = worker_state.revoked - getpid = os.getpid - - precalc = {ACK: self._create_payload(ACK, (0, )), - NACK: self._create_payload(NACK, (0, ))} - - def _put_back(job, _time=time.time): - # puts back at the end of the queue - if job._terminated is not None or \ - job.correlation_id in revoked_tasks: - if not job._accepted: - job._ack(None, _time(), getpid(), None) - job._set_terminated(job._terminated) - else: - # XXX linear lookup, should find a better way, - # but this happens rarely and is here to protect against races. - if job not in outbound: - outbound.appendleft(job) - self._put_back = _put_back - - # called for every event loop iteration, and if there - # are messages pending this will schedule writing one message - # by registering the 'schedule_writes' function for all currently - # inactive inqueues (not already being written to) - - # consolidate means the event loop will merge them - # and call the callback once with the list writable fds as - # argument. Using this means we minimize the risk of having - # the same fd receive every task if the pipe read buffer is not - # full. - if is_fair_strategy: - - def on_poll_start(): - if outbound and len(busy_workers) < len(all_inqueues): - inactive = diff(active_writes) - [hub_add(fd, None, WRITE | ERR, consolidate=True) - for fd in inactive] - else: - [hub_remove(fd) for fd in diff(active_writes)] - else: - def on_poll_start(): # noqa - if outbound: - [hub_add(fd, None, WRITE | ERR, consolidate=True) - for fd in diff(active_writes)] - else: - [hub_remove(fd) for fd in diff(active_writes)] - self.on_poll_start = on_poll_start - - def on_inqueue_close(fd, proc): - # Makes sure the fd is removed from tracking when - # the connection is closed, this is essential as fds may be reused. - busy_workers.discard(fd) - try: - if fileno_to_inq[fd] is proc: - fileno_to_inq.pop(fd, None) - active_writes.discard(fd) - all_inqueues.discard(fd) - hub_remove(fd) - except KeyError: - pass - self.on_inqueue_close = on_inqueue_close - - def schedule_writes(ready_fds, curindex=[0]): - # Schedule write operation to ready file descriptor. - # The file descriptor is writeable, but that does not - # mean the process is currently reading from the socket. - # The socket is buffered so writeable simply means that - # the buffer can accept at least 1 byte of data. - - # This means we have to cycle between the ready fds. - # the first version used shuffle, but using i % total - # is about 30% faster with many processes. The latter - # also shows more fairness in write stats when used with - # many processes [XXX On OS X, this may vary depending - # on event loop implementation (i.e select vs epoll), so - # have to test further] - total = len(ready_fds) - - for i in range(total): - ready_fd = ready_fds[curindex[0] % total] - if ready_fd in active_writes: - # already writing to this fd - curindex[0] += 1 - continue - if is_fair_strategy and ready_fd in busy_workers: - # worker is already busy with another task - curindex[0] += 1 - continue - if ready_fd not in all_inqueues: - hub_remove(ready_fd) - curindex[0] += 1 - continue - try: - job = pop_message() - except IndexError: - # no more messages, remove all inactive fds from the hub. - # this is important since the fds are always writeable - # as long as there's 1 byte left in the buffer, and so - # this may create a spinloop where the event loop - # always wakes up. - for inqfd in diff(active_writes): - hub_remove(inqfd) - break - else: - if not job._accepted: # job not accepted by another worker - try: - # keep track of what process the write operation - # was scheduled for. - proc = job._scheduled_for = fileno_to_inq[ready_fd] - except KeyError: - # write was scheduled for this fd but the process - # has since exited and the message must be sent to - # another process. - put_back_message(job) - curindex[0] += 1 - continue - cor = _write_job(proc, ready_fd, job) - job._writer = ref(cor) - mark_write_gen_as_active(cor) - mark_write_fd_as_active(ready_fd) - mark_worker_as_busy(ready_fd) - - # Try to write immediately, in case there's an error. - try: - next(cor) - except StopIteration: - pass - except OSError as exc: - if get_errno(exc) != errno.EBADF: - raise - else: - add_writer(ready_fd, cor) - curindex[0] += 1 - hub.consolidate_callback = schedule_writes - - def send_job(tup): - # Schedule writing job request for when one of the process - # inqueues are writable. - body = dumps(tup, protocol=protocol) - body_size = len(body) - header = pack('>I', body_size) - # index 1,0 is the job ID. - job = get_job(tup[1][0]) - job._payload = buf_t(header), buf_t(body), body_size - append_message(job) - self._quick_put = send_job - - def on_not_recovering(proc, fd, job, exc): - error('Process inqueue damaged: %r %r: %r', - proc, proc.exitcode, exc, exc_info=1) - if proc._is_alive(): - proc.terminate() - hub.remove(fd) - self._put_back(job) - - def _write_job(proc, fd, job): - # writes job to the worker process. - # Operation must complete if more than one byte of data - # was written. If the broker connection is lost - # and no data was written the operation shall be canceled. - header, body, body_size = job._payload - errors = 0 - try: - # job result keeps track of what process the job is sent to. - job._write_to = proc - send = proc.send_job_offset - - Hw = Bw = 0 - # write header - while Hw < 4: - try: - Hw += send(header, Hw) - except Exception as exc: - if get_errno(exc) not in UNAVAIL: - raise - # suspend until more data - errors += 1 - if errors > 100: - on_not_recovering(proc, fd, job, exc) - raise StopIteration() - yield - else: - errors = 0 - - # write body - while Bw < body_size: - try: - Bw += send(body, Bw) - except Exception as exc: - if get_errno(exc) not in UNAVAIL: - raise - # suspend until more data - errors += 1 - if errors > 100: - on_not_recovering(proc, fd, job, exc) - raise StopIteration() - yield - else: - errors = 0 - finally: - hub_remove(fd) - write_stats[proc.index] += 1 - # message written, so this fd is now available - active_writes.discard(fd) - write_generator_done(job._writer()) # is a weakref - - def send_ack(response, pid, job, fd, WRITE=WRITE, ERR=ERR): - # Only used when synack is enabled. - # Schedule writing ack response for when the fd is writeable. - msg = Ack(job, fd, precalc[response]) - callback = promise(write_generator_done) - cor = _write_ack(fd, msg, callback=callback) - mark_write_gen_as_active(cor) - mark_write_fd_as_active(fd) - callback.args = (cor, ) - add_writer(fd, cor) - self.send_ack = send_ack - - def _write_ack(fd, ack, callback=None): - # writes ack back to the worker if synack enabled. - # this operation *MUST* complete, otherwise - # the worker process will hang waiting for the ack. - header, body, body_size = ack[2] - try: - try: - proc = fileno_to_synq[fd] - except KeyError: - # process died, we can safely discard the ack at this - # point. - raise StopIteration() - send = proc.send_syn_offset - - Hw = Bw = 0 - # write header - while Hw < 4: - try: - Hw += send(header, Hw) - except Exception as exc: - if get_errno(exc) not in UNAVAIL: - raise - yield - - # write body - while Bw < body_size: - try: - Bw += send(body, Bw) - except Exception as exc: - if get_errno(exc) not in UNAVAIL: - raise - # suspend until more data - yield - finally: - if callback: - callback() - # message written, so this fd is now available - active_writes.discard(fd) - - def flush(self): - if self._state == TERMINATE: - return - # cancel all tasks that have not been accepted so that NACK is sent. - for job in values(self._cache): - if not job._accepted: - job._cancel() - - # clear the outgoing buffer as the tasks will be redelivered by - # the broker anyway. - if self.outbound_buffer: - self.outbound_buffer.clear() - - self.maintain_pool() - - try: - # ...but we must continue writing the payloads we already started - # to keep message boundaries. - # The messages may be NACK'ed later if synack is enabled. - if self._state == RUN: - # flush outgoing buffers - intervals = fxrange(0.01, 0.1, 0.01, repeatlast=True) - owned_by = {} - for job in values(self._cache): - writer = _get_job_writer(job) - if writer is not None: - owned_by[writer] = job - - while self._active_writers: - writers = list(self._active_writers) - for gen in writers: - if (gen.__name__ == '_write_job' and - gen_not_started(gen)): - # has not started writing the job so can - # discard the task, but we must also remove - # it from the Pool._cache. - try: - job = owned_by[gen] - except KeyError: - pass - else: - # removes from Pool._cache - job.discard() - self._active_writers.discard(gen) - else: - try: - job = owned_by[gen] - except KeyError: - pass - else: - job_proc = job._write_to - if job_proc._is_alive(): - self._flush_writer(job_proc, gen) - # workers may have exited in the meantime. - self.maintain_pool() - sleep(next(intervals)) # don't busyloop - finally: - self.outbound_buffer.clear() - self._active_writers.clear() - self._active_writes.clear() - self._busy_workers.clear() - - def _flush_writer(self, proc, writer): - fds = set([proc.inq._writer]) - try: - while fds: - if not proc._is_alive(): - break # process exited - readable, writable, again = _select( - writers=fds, err=fds, timeout=0.5, - ) - if not again and (writable or readable): - try: - next(writer) - except (StopIteration, OSError, IOError, EOFError): - break - finally: - self._active_writers.discard(writer) - - def get_process_queues(self): - """Get queues for a new process. - - Here we will find an unused slot, as there should always - be one available when we start a new process. - """ - return next(q for q, owner in items(self._queues) - if owner is None) - - def on_grow(self, n): - """Grow the pool by ``n`` proceses.""" - diff = max(self._processes - len(self._queues), 0) - if diff: - self._queues.update( - dict((self.create_process_queues(), None) for _ in range(diff)) - ) - - def on_shrink(self, n): - """Shrink the pool by ``n`` processes.""" - pass - - def create_process_queues(self): - """Creates new in, out (and optionally syn) queues, - returned as a tuple.""" - # NOTE: Pipes must be set O_NONBLOCK at creation time (the original - # fd), otherwise it will not be possible to change the flags until - # there is an actual reader/writer on the other side. - inq = _SimpleQueue(wnonblock=True) - outq = _SimpleQueue(rnonblock=True) - synq = None - assert isblocking(inq._reader) - assert not isblocking(inq._writer) - assert not isblocking(outq._reader) - assert isblocking(outq._writer) - if self.synack: - synq = _SimpleQueue(wnonblock=True) - assert isblocking(synq._reader) - assert not isblocking(synq._writer) - return inq, outq, synq - - def on_process_alive(self, pid): - """Handler called when the :const:`WORKER_UP` message is received - from a child process, which marks the process as ready - to receive work.""" - try: - proc = next(w for w in self._pool if w.pid == pid) - except StopIteration: - return logger.warning('process with pid=%s already exited', pid) - assert proc.inqW_fd not in self._fileno_to_inq - assert proc.inqW_fd not in self._all_inqueues - self._waiting_to_start.discard(proc) - self._fileno_to_inq[proc.inqW_fd] = proc - self._fileno_to_synq[proc.synqW_fd] = proc - self._all_inqueues.add(proc.inqW_fd) - - def on_job_process_down(self, job, pid_gone): - """Handler called for each job when the process it was assigned to - exits.""" - if job._write_to and not job._write_to._is_alive(): - # job was partially written - self.on_partial_read(job, job._write_to) - elif job._scheduled_for and not job._scheduled_for._is_alive(): - # job was only scheduled to be written to this process, - # but no data was sent so put it back on the outbound_buffer. - self._put_back(job) - - def on_job_process_lost(self, job, pid, exitcode): - """Handler called for each *started* job when the process it - was assigned to exited by mysterious means (error exitcodes and - signals)""" - self.mark_as_worker_lost(job, exitcode) - - def human_write_stats(self): - if self.write_stats is None: - return 'N/A' - vals = list(values(self.write_stats)) - total = sum(vals) - - def per(v, total): - return '{0:.2f}%'.format((float(v) / total) * 100.0 if v else 0) - - return { - 'total': total, - 'avg': per(total / len(self.write_stats) if total else 0, total), - 'all': ', '.join(per(v, total) for v in vals), - 'raw': ', '.join(map(str, vals)), - 'inqueues': { - 'total': len(self._all_inqueues), - 'active': len(self._active_writes), - } - } - - def _process_cleanup_queues(self, proc): - """Handler called to clean up a processes queues after process - exit.""" - if not proc.dead: - try: - self._queues[self._find_worker_queues(proc)] = None - except (KeyError, ValueError): - pass - - @staticmethod - def _stop_task_handler(task_handler): - """Called at shutdown to tell processes that we are shutting down.""" - for proc in task_handler.pool: - try: - setblocking(proc.inq._writer, 1) - except (OSError, IOError): - pass - else: - try: - proc.inq.put(None) - except OSError as exc: - if get_errno(exc) != errno.EBADF: - raise - - def create_result_handler(self): - return super(AsynPool, self).create_result_handler( - fileno_to_outq=self._fileno_to_outq, - on_process_alive=self.on_process_alive, - ) - - def _process_register_queues(self, proc, queues): - """Marks new ownership for ``queues`` so that the fileno indices are - updated.""" - assert queues in self._queues - b = len(self._queues) - self._queues[queues] = proc - assert b == len(self._queues) - - def _find_worker_queues(self, proc): - """Find the queues owned by ``proc``.""" - try: - return next(q for q, owner in items(self._queues) - if owner == proc) - except StopIteration: - raise ValueError(proc) - - def _setup_queues(self): - # this is only used by the original pool which uses a shared - # queue for all processes. - - # these attributes makes no sense for us, but we will still - # have to initialize them. - self._inqueue = self._outqueue = \ - self._quick_put = self._quick_get = self._poll_result = None - - def process_flush_queues(self, proc): - """Flushes all queues, including the outbound buffer, so that - all tasks that have not been started will be discarded. - - In Celery this is called whenever the transport connection is lost - (consumer restart). - - """ - resq = proc.outq._reader - on_state_change = self._result_handler.on_state_change - fds = set([resq]) - while fds and not resq.closed and self._state != TERMINATE: - readable, _, again = _select(fds, None, fds, timeout=0.01) - if readable: - try: - task = resq.recv() - except (OSError, IOError, EOFError) as exc: - if get_errno(exc) == errno.EINTR: - continue - elif get_errno(exc) == errno.EAGAIN: - break - else: - debug('got %r while flushing process %r', - exc, proc, exc_info=1) - if get_errno(exc) not in UNAVAIL: - debug('got %r while flushing process %r', - exc, proc, exc_info=1) - break - else: - if task is None: - debug('got sentinel while flushing process %r', proc) - break - else: - on_state_change(task) - else: - break - - def on_partial_read(self, job, proc): - """Called when a job was only partially written to a child process - and it exited.""" - # worker terminated by signal: - # we cannot reuse the sockets again, because we don't know if - # the process wrote/read anything frmo them, and if so we cannot - # restore the message boundaries. - if not job._accepted: - # job was not acked, so find another worker to send it to. - self._put_back(job) - writer = _get_job_writer(job) - if writer: - self._active_writers.discard(writer) - del(writer) - - if not proc.dead: - proc.dead = True - # Replace queues to avoid reuse - before = len(self._queues) - try: - queues = self._find_worker_queues(proc) - if self.destroy_queues(queues, proc): - self._queues[self.create_process_queues()] = None - except ValueError: - pass - assert len(self._queues) == before - - def destroy_queues(self, queues, proc): - """Destroy queues that can no longer be used, so that they - be replaced by new sockets.""" - assert not proc._is_alive() - self._waiting_to_start.discard(proc) - removed = 1 - try: - self._queues.pop(queues) - except KeyError: - removed = 0 - try: - self.on_inqueue_close(queues[0]._writer.fileno(), proc) - except IOError: - pass - for queue in queues: - if queue: - for sock in (queue._reader, queue._writer): - if not sock.closed: - try: - sock.close() - except (IOError, OSError): - pass - return removed - - def _create_payload(self, type_, args, - dumps=_pickle.dumps, pack=struct.pack, - protocol=HIGHEST_PROTOCOL): - body = dumps((type_, args), protocol=protocol) - size = len(body) - header = pack('>I', size) - return header, body, size - - @classmethod - def _set_result_sentinel(cls, _outqueue, _pool): - # unused - pass - - def _help_stuff_finish_args(self): - # Pool._help_stuff_finished is a classmethod so we have to use this - # trick to modify the arguments passed to it. - return (self._pool, ) - - @classmethod - def _help_stuff_finish(cls, pool): - debug( - 'removing tasks from inqueue until task handler finished', - ) - fileno_to_proc = {} - inqR = set() - for w in pool: - try: - fd = w.inq._reader.fileno() - inqR.add(fd) - fileno_to_proc[fd] = w - except IOError: - pass - while inqR: - readable, _, again = _select(inqR, timeout=0.5) - if again: - continue - if not readable: - break - for fd in readable: - fileno_to_proc[fd].inq._reader.recv() - sleep(0) - - @property - def timers(self): - return {self.maintain_pool: 5.0} diff --git a/thesisenv/lib/python3.6/site-packages/celery/concurrency/base.py b/thesisenv/lib/python3.6/site-packages/celery/concurrency/base.py deleted file mode 100644 index 29c348d..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/concurrency/base.py +++ /dev/null @@ -1,171 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.concurrency.base - ~~~~~~~~~~~~~~~~~~~~~~~ - - TaskPool interface. - -""" -from __future__ import absolute_import - -import logging -import os -import sys - -from billiard.einfo import ExceptionInfo -from billiard.exceptions import WorkerLostError -from kombu.utils.encoding import safe_repr - -from celery.exceptions import WorkerShutdown, WorkerTerminate -from celery.five import monotonic, reraise -from celery.utils import timer2 -from celery.utils.text import truncate -from celery.utils.log import get_logger - -__all__ = ['BasePool', 'apply_target'] - -logger = get_logger('celery.pool') - - -def apply_target(target, args=(), kwargs={}, callback=None, - accept_callback=None, pid=None, getpid=os.getpid, - propagate=(), monotonic=monotonic, **_): - if accept_callback: - accept_callback(pid or getpid(), monotonic()) - try: - ret = target(*args, **kwargs) - except propagate: - raise - except Exception: - raise - except (WorkerShutdown, WorkerTerminate): - raise - except BaseException as exc: - try: - reraise(WorkerLostError, WorkerLostError(repr(exc)), - sys.exc_info()[2]) - except WorkerLostError: - callback(ExceptionInfo()) - else: - callback(ret) - - -class BasePool(object): - RUN = 0x1 - CLOSE = 0x2 - TERMINATE = 0x3 - - Timer = timer2.Timer - - #: set to true if the pool can be shutdown from within - #: a signal handler. - signal_safe = True - - #: set to true if pool uses greenlets. - is_green = False - - _state = None - _pool = None - - #: only used by multiprocessing pool - uses_semaphore = False - - task_join_will_block = True - - def __init__(self, limit=None, putlocks=True, - forking_enable=True, callbacks_propagate=(), **options): - self.limit = limit - self.putlocks = putlocks - self.options = options - self.forking_enable = forking_enable - self.callbacks_propagate = callbacks_propagate - self._does_debug = logger.isEnabledFor(logging.DEBUG) - - def on_start(self): - pass - - def did_start_ok(self): - return True - - def flush(self): - pass - - def on_stop(self): - pass - - def register_with_event_loop(self, loop): - pass - - def on_apply(self, *args, **kwargs): - pass - - def on_terminate(self): - pass - - def on_soft_timeout(self, job): - pass - - def on_hard_timeout(self, job): - pass - - def maintain_pool(self, *args, **kwargs): - pass - - def terminate_job(self, pid, signal=None): - raise NotImplementedError( - '{0} does not implement kill_job'.format(type(self))) - - def restart(self): - raise NotImplementedError( - '{0} does not implement restart'.format(type(self))) - - def stop(self): - self.on_stop() - self._state = self.TERMINATE - - def terminate(self): - self._state = self.TERMINATE - self.on_terminate() - - def start(self): - self.on_start() - self._state = self.RUN - - def close(self): - self._state = self.CLOSE - self.on_close() - - def on_close(self): - pass - - def apply_async(self, target, args=[], kwargs={}, **options): - """Equivalent of the :func:`apply` built-in function. - - Callbacks should optimally return as soon as possible since - otherwise the thread which handles the result will get blocked. - - """ - if self._does_debug: - logger.debug('TaskPool: Apply %s (args:%s kwargs:%s)', - target, truncate(safe_repr(args), 1024), - truncate(safe_repr(kwargs), 1024)) - - return self.on_apply(target, args, kwargs, - waitforslot=self.putlocks, - callbacks_propagate=self.callbacks_propagate, - **options) - - def _get_info(self): - return {} - - @property - def info(self): - return self._get_info() - - @property - def active(self): - return self._state == self.RUN - - @property - def num_processes(self): - return self.limit diff --git a/thesisenv/lib/python3.6/site-packages/celery/concurrency/eventlet.py b/thesisenv/lib/python3.6/site-packages/celery/concurrency/eventlet.py deleted file mode 100644 index 3ae4549..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/concurrency/eventlet.py +++ /dev/null @@ -1,161 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.concurrency.eventlet - ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Eventlet pool implementation. - -""" -from __future__ import absolute_import - -import sys - -from time import time - -__all__ = ['TaskPool'] - -W_RACE = """\ -Celery module with %s imported before eventlet patched\ -""" -RACE_MODS = ('billiard.', 'celery.', 'kombu.') - - -#: Warn if we couldn't patch early enough, -#: and thread/socket depending celery modules have already been loaded. -for mod in (mod for mod in sys.modules if mod.startswith(RACE_MODS)): - for side in ('thread', 'threading', 'socket'): # pragma: no cover - if getattr(mod, side, None): - import warnings - warnings.warn(RuntimeWarning(W_RACE % side)) - - -from celery import signals # noqa -from celery.utils import timer2 # noqa - -from . import base # noqa - - -def apply_target(target, args=(), kwargs={}, callback=None, - accept_callback=None, getpid=None): - return base.apply_target(target, args, kwargs, callback, accept_callback, - pid=getpid()) - - -class Schedule(timer2.Schedule): - - def __init__(self, *args, **kwargs): - from eventlet.greenthread import spawn_after - from greenlet import GreenletExit - super(Schedule, self).__init__(*args, **kwargs) - - self.GreenletExit = GreenletExit - self._spawn_after = spawn_after - self._queue = set() - - def _enter(self, eta, priority, entry): - secs = max(eta - time(), 0) - g = self._spawn_after(secs, entry) - self._queue.add(g) - g.link(self._entry_exit, entry) - g.entry = entry - g.eta = eta - g.priority = priority - g.canceled = False - return g - - def _entry_exit(self, g, entry): - try: - try: - g.wait() - except self.GreenletExit: - entry.cancel() - g.canceled = True - finally: - self._queue.discard(g) - - def clear(self): - queue = self._queue - while queue: - try: - queue.pop().cancel() - except (KeyError, self.GreenletExit): - pass - - @property - def queue(self): - return self._queue - - -class Timer(timer2.Timer): - Schedule = Schedule - - def ensure_started(self): - pass - - def stop(self): - self.schedule.clear() - - def cancel(self, tref): - try: - tref.cancel() - except self.schedule.GreenletExit: - pass - - def start(self): - pass - - -class TaskPool(base.BasePool): - Timer = Timer - - signal_safe = False - is_green = True - task_join_will_block = False - - def __init__(self, *args, **kwargs): - from eventlet import greenthread - from eventlet.greenpool import GreenPool - self.Pool = GreenPool - self.getcurrent = greenthread.getcurrent - self.getpid = lambda: id(greenthread.getcurrent()) - self.spawn_n = greenthread.spawn_n - - super(TaskPool, self).__init__(*args, **kwargs) - - def on_start(self): - self._pool = self.Pool(self.limit) - signals.eventlet_pool_started.send(sender=self) - self._quick_put = self._pool.spawn_n - self._quick_apply_sig = signals.eventlet_pool_apply.send - - def on_stop(self): - signals.eventlet_pool_preshutdown.send(sender=self) - if self._pool is not None: - self._pool.waitall() - signals.eventlet_pool_postshutdown.send(sender=self) - - def on_apply(self, target, args=None, kwargs=None, callback=None, - accept_callback=None, **_): - self._quick_apply_sig( - sender=self, target=target, args=args, kwargs=kwargs, - ) - self._quick_put(apply_target, target, args, kwargs, - callback, accept_callback, - self.getpid) - - def grow(self, n=1): - limit = self.limit + n - self._pool.resize(limit) - self.limit = limit - - def shrink(self, n=1): - limit = self.limit - n - self._pool.resize(limit) - self.limit = limit - - def _get_info(self): - return { - 'max-concurrency': self.limit, - 'free-threads': self._pool.free(), - 'running-threads': self._pool.running(), - } diff --git a/thesisenv/lib/python3.6/site-packages/celery/concurrency/gevent.py b/thesisenv/lib/python3.6/site-packages/celery/concurrency/gevent.py deleted file mode 100644 index f567f57..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/concurrency/gevent.py +++ /dev/null @@ -1,136 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.concurrency.gevent - ~~~~~~~~~~~~~~~~~~~~~~~~~ - - gevent pool implementation. - -""" -from __future__ import absolute_import - -from time import time - -try: - from gevent import Timeout -except ImportError: # pragma: no cover - Timeout = None # noqa - -from celery.utils import timer2 - -from .base import apply_target, BasePool - -__all__ = ['TaskPool'] - - -def apply_timeout(target, args=(), kwargs={}, callback=None, - accept_callback=None, pid=None, timeout=None, - timeout_callback=None, Timeout=Timeout, - apply_target=apply_target, **rest): - try: - with Timeout(timeout): - return apply_target(target, args, kwargs, callback, - accept_callback, pid, - propagate=(Timeout, ), **rest) - except Timeout: - return timeout_callback(False, timeout) - - -class Schedule(timer2.Schedule): - - def __init__(self, *args, **kwargs): - from gevent.greenlet import Greenlet, GreenletExit - - class _Greenlet(Greenlet): - cancel = Greenlet.kill - - self._Greenlet = _Greenlet - self._GreenletExit = GreenletExit - super(Schedule, self).__init__(*args, **kwargs) - self._queue = set() - - def _enter(self, eta, priority, entry): - secs = max(eta - time(), 0) - g = self._Greenlet.spawn_later(secs, entry) - self._queue.add(g) - g.link(self._entry_exit) - g.entry = entry - g.eta = eta - g.priority = priority - g.canceled = False - return g - - def _entry_exit(self, g): - try: - g.kill() - finally: - self._queue.discard(g) - - def clear(self): - queue = self._queue - while queue: - try: - queue.pop().kill() - except KeyError: - pass - - @property - def queue(self): - return self._queue - - -class Timer(timer2.Timer): - Schedule = Schedule - - def ensure_started(self): - pass - - def stop(self): - self.schedule.clear() - - def start(self): - pass - - -class TaskPool(BasePool): - Timer = Timer - - signal_safe = False - is_green = True - task_join_will_block = False - - def __init__(self, *args, **kwargs): - from gevent import spawn_raw - from gevent.pool import Pool - self.Pool = Pool - self.spawn_n = spawn_raw - self.timeout = kwargs.get('timeout') - super(TaskPool, self).__init__(*args, **kwargs) - - def on_start(self): - self._pool = self.Pool(self.limit) - self._quick_put = self._pool.spawn - - def on_stop(self): - if self._pool is not None: - self._pool.join() - - def on_apply(self, target, args=None, kwargs=None, callback=None, - accept_callback=None, timeout=None, - timeout_callback=None, **_): - timeout = self.timeout if timeout is None else timeout - return self._quick_put(apply_timeout if timeout else apply_target, - target, args, kwargs, callback, accept_callback, - timeout=timeout, - timeout_callback=timeout_callback) - - def grow(self, n=1): - self._pool._semaphore.counter += n - self._pool.size += n - - def shrink(self, n=1): - self._pool._semaphore.counter -= n - self._pool.size -= n - - @property - def num_processes(self): - return len(self._pool) diff --git a/thesisenv/lib/python3.6/site-packages/celery/concurrency/prefork.py b/thesisenv/lib/python3.6/site-packages/celery/concurrency/prefork.py deleted file mode 100644 index 1771f5c..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/concurrency/prefork.py +++ /dev/null @@ -1,178 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.concurrency.prefork - ~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Pool implementation using :mod:`multiprocessing`. - -""" -from __future__ import absolute_import - -import os - -from billiard import forking_enable -from billiard.pool import RUN, CLOSE, Pool as BlockingPool - -from celery import platforms -from celery import signals -from celery._state import set_default_app, _set_task_join_will_block -from celery.app import trace -from celery.concurrency.base import BasePool -from celery.five import items -from celery.utils.functional import noop -from celery.utils.log import get_logger - -from .asynpool import AsynPool - -__all__ = ['TaskPool', 'process_initializer', 'process_destructor'] - -#: List of signals to reset when a child process starts. -WORKER_SIGRESET = frozenset(['SIGTERM', - 'SIGHUP', - 'SIGTTIN', - 'SIGTTOU', - 'SIGUSR1']) - -#: List of signals to ignore when a child process starts. -WORKER_SIGIGNORE = frozenset(['SIGINT']) - -logger = get_logger(__name__) -warning, debug = logger.warning, logger.debug - - -def process_initializer(app, hostname): - """Pool child process initializer. - - This will initialize a child pool process to ensure the correct - app instance is used and things like - logging works. - - """ - _set_task_join_will_block(True) - platforms.signals.reset(*WORKER_SIGRESET) - platforms.signals.ignore(*WORKER_SIGIGNORE) - platforms.set_mp_process_title('celeryd', hostname=hostname) - # This is for Windows and other platforms not supporting - # fork(). Note that init_worker makes sure it's only - # run once per process. - app.loader.init_worker() - app.loader.init_worker_process() - logfile = os.environ.get('CELERY_LOG_FILE') or None - if logfile and '%i' in logfile.lower(): - # logfile path will differ so need to set up logging again. - app.log.already_setup = False - app.log.setup(int(os.environ.get('CELERY_LOG_LEVEL', 0) or 0), - logfile, - bool(os.environ.get('CELERY_LOG_REDIRECT', False)), - str(os.environ.get('CELERY_LOG_REDIRECT_LEVEL')), - hostname=hostname) - if os.environ.get('FORKED_BY_MULTIPROCESSING'): - # pool did execv after fork - trace.setup_worker_optimizations(app) - else: - app.set_current() - set_default_app(app) - app.finalize() - trace._tasks = app._tasks # enables fast_trace_task optimization. - # rebuild execution handler for all tasks. - from celery.app.trace import build_tracer - for name, task in items(app.tasks): - task.__trace__ = build_tracer(name, task, app.loader, hostname, - app=app) - from celery.worker import state as worker_state - worker_state.reset_state() - signals.worker_process_init.send(sender=None) - - -def process_destructor(pid, exitcode): - """Pool child process destructor - - Dispatch the :signal:`worker_process_shutdown` signal. - - """ - signals.worker_process_shutdown.send( - sender=None, pid=pid, exitcode=exitcode, - ) - - -class TaskPool(BasePool): - """Multiprocessing Pool implementation.""" - Pool = AsynPool - BlockingPool = BlockingPool - - uses_semaphore = True - write_stats = None - - def on_start(self): - """Run the task pool. - - Will pre-fork all workers so they're ready to accept tasks. - - """ - forking_enable(self.forking_enable) - Pool = (self.BlockingPool if self.options.get('threads', True) - else self.Pool) - P = self._pool = Pool(processes=self.limit, - initializer=process_initializer, - on_process_exit=process_destructor, - synack=False, - **self.options) - - # Create proxy methods - self.on_apply = P.apply_async - self.maintain_pool = P.maintain_pool - self.terminate_job = P.terminate_job - self.grow = P.grow - self.shrink = P.shrink - self.flush = getattr(P, 'flush', None) # FIXME add to billiard - - def restart(self): - self._pool.restart() - self._pool.apply_async(noop) - - def did_start_ok(self): - return self._pool.did_start_ok() - - def register_with_event_loop(self, loop): - try: - reg = self._pool.register_with_event_loop - except AttributeError: - return - return reg(loop) - - def on_stop(self): - """Gracefully stop the pool.""" - if self._pool is not None and self._pool._state in (RUN, CLOSE): - self._pool.close() - self._pool.join() - self._pool = None - - def on_terminate(self): - """Force terminate the pool.""" - if self._pool is not None: - self._pool.terminate() - self._pool = None - - def on_close(self): - if self._pool is not None and self._pool._state == RUN: - self._pool.close() - - def _get_info(self): - try: - write_stats = self._pool.human_write_stats - except AttributeError: - def write_stats(): - return 'N/A' # only supported by asynpool - return { - 'max-concurrency': self.limit, - 'processes': [p.pid for p in self._pool._pool], - 'max-tasks-per-child': self._pool._maxtasksperchild or 'N/A', - 'put-guarded-by-semaphore': self.putlocks, - 'timeouts': (self._pool.soft_timeout or 0, - self._pool.timeout or 0), - 'writes': write_stats() - } - - @property - def num_processes(self): - return self._pool._processes diff --git a/thesisenv/lib/python3.6/site-packages/celery/concurrency/solo.py b/thesisenv/lib/python3.6/site-packages/celery/concurrency/solo.py deleted file mode 100644 index a2dc199..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/concurrency/solo.py +++ /dev/null @@ -1,30 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.concurrency.solo - ~~~~~~~~~~~~~~~~~~~~~~~ - - Single-threaded pool implementation. - -""" -from __future__ import absolute_import - -import os - -from .base import BasePool, apply_target - -__all__ = ['TaskPool'] - - -class TaskPool(BasePool): - """Solo task pool (blocking, inline, fast).""" - - def __init__(self, *args, **kwargs): - super(TaskPool, self).__init__(*args, **kwargs) - self.on_apply = apply_target - - def _get_info(self): - return {'max-concurrency': 1, - 'processes': [os.getpid()], - 'max-tasks-per-child': None, - 'put-guarded-by-semaphore': True, - 'timeouts': ()} diff --git a/thesisenv/lib/python3.6/site-packages/celery/concurrency/threads.py b/thesisenv/lib/python3.6/site-packages/celery/concurrency/threads.py deleted file mode 100644 index fee901e..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/concurrency/threads.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.concurrency.threads - ~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Pool implementation using threads. - -""" -from __future__ import absolute_import - -from celery.five import UserDict - -from .base import apply_target, BasePool - -__all__ = ['TaskPool'] - - -class NullDict(UserDict): - - def __setitem__(self, key, value): - pass - - -class TaskPool(BasePool): - - def __init__(self, *args, **kwargs): - try: - import threadpool - except ImportError: - raise ImportError( - 'The threaded pool requires the threadpool module.') - self.WorkRequest = threadpool.WorkRequest - self.ThreadPool = threadpool.ThreadPool - super(TaskPool, self).__init__(*args, **kwargs) - - def on_start(self): - self._pool = self.ThreadPool(self.limit) - # threadpool stores all work requests until they are processed - # we don't need this dict, and it occupies way too much memory. - self._pool.workRequests = NullDict() - self._quick_put = self._pool.putRequest - self._quick_clear = self._pool._results_queue.queue.clear - - def on_stop(self): - self._pool.dismissWorkers(self.limit, do_join=True) - - def on_apply(self, target, args=None, kwargs=None, callback=None, - accept_callback=None, **_): - req = self.WorkRequest(apply_target, (target, args, kwargs, callback, - accept_callback)) - self._quick_put(req) - # threadpool also has callback support, - # but for some reason the callback is not triggered - # before you've collected the results. - # Clear the results (if any), so it doesn't grow too large. - self._quick_clear() - return req diff --git a/thesisenv/lib/python3.6/site-packages/celery/contrib/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/contrib/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/thesisenv/lib/python3.6/site-packages/celery/contrib/abortable.py b/thesisenv/lib/python3.6/site-packages/celery/contrib/abortable.py deleted file mode 100644 index dcdc615..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/contrib/abortable.py +++ /dev/null @@ -1,172 +0,0 @@ -# -*- coding: utf-8 -*- -""" -========================= -Abortable tasks overview -========================= - -For long-running :class:`Task`'s, it can be desirable to support -aborting during execution. Of course, these tasks should be built to -support abortion specifically. - -The :class:`AbortableTask` serves as a base class for all :class:`Task` -objects that should support abortion by producers. - -* Producers may invoke the :meth:`abort` method on - :class:`AbortableAsyncResult` instances, to request abortion. - -* Consumers (workers) should periodically check (and honor!) the - :meth:`is_aborted` method at controlled points in their task's - :meth:`run` method. The more often, the better. - -The necessary intermediate communication is dealt with by the -:class:`AbortableTask` implementation. - -Usage example -------------- - -In the consumer: - -.. code-block:: python - - from __future__ import absolute_import - - from celery.contrib.abortable import AbortableTask - from celery.utils.log import get_task_logger - - from proj.celery import app - - logger = get_logger(__name__) - - @app.task(bind=True, base=AbortableTask) - def long_running_task(self): - results = [] - for i in range(100): - # check after every 5 iterations... - # (or alternatively, check when some timer is due) - if not i % 5: - if self.is_aborted(): - # respect aborted state, and terminate gracefully. - logger.warning('Task aborted') - return - value = do_something_expensive(i) - results.append(y) - logger.info('Task complete') - return results - -In the producer: - -.. code-block:: python - - from __future__ import absolute_import - - import time - - from proj.tasks import MyLongRunningTask - - def myview(request): - # result is of type AbortableAsyncResult - result = long_running_task.delay() - - # abort the task after 10 seconds - time.sleep(10) - result.abort() - -After the `result.abort()` call, the task execution is not -aborted immediately. In fact, it is not guaranteed to abort at all. Keep -checking `result.state` status, or call `result.get(timeout=)` to -have it block until the task is finished. - -.. note:: - - In order to abort tasks, there needs to be communication between the - producer and the consumer. This is currently implemented through the - database backend. Therefore, this class will only work with the - database backends. - -""" -from __future__ import absolute_import - -from celery import Task -from celery.result import AsyncResult - -__all__ = ['AbortableAsyncResult', 'AbortableTask'] - - -""" -Task States ------------ - -.. state:: ABORTED - -ABORTED -~~~~~~~ - -Task is aborted (typically by the producer) and should be -aborted as soon as possible. - -""" -ABORTED = 'ABORTED' - - -class AbortableAsyncResult(AsyncResult): - """Represents a abortable result. - - Specifically, this gives the `AsyncResult` a :meth:`abort()` method, - which sets the state of the underlying Task to `'ABORTED'`. - - """ - - def is_aborted(self): - """Return :const:`True` if the task is (being) aborted.""" - return self.state == ABORTED - - def abort(self): - """Set the state of the task to :const:`ABORTED`. - - Abortable tasks monitor their state at regular intervals and - terminate execution if so. - - Be aware that invoking this method does not guarantee when the - task will be aborted (or even if the task will be aborted at - all). - - """ - # TODO: store_result requires all four arguments to be set, - # but only status should be updated here - return self.backend.store_result(self.id, result=None, - status=ABORTED, traceback=None) - - -class AbortableTask(Task): - """A celery task that serves as a base class for all :class:`Task`'s - that support aborting during execution. - - All subclasses of :class:`AbortableTask` must call the - :meth:`is_aborted` method periodically and act accordingly when - the call evaluates to :const:`True`. - - """ - abstract = True - - def AsyncResult(self, task_id): - """Return the accompanying AbortableAsyncResult instance.""" - return AbortableAsyncResult(task_id, backend=self.backend) - - def is_aborted(self, **kwargs): - """Checks against the backend whether this - :class:`AbortableAsyncResult` is :const:`ABORTED`. - - Always return :const:`False` in case the `task_id` parameter - refers to a regular (non-abortable) :class:`Task`. - - Be aware that invoking this method will cause a hit in the - backend (for example a database query), so find a good balance - between calling it regularly (for responsiveness), but not too - often (for performance). - - """ - task_id = kwargs.get('task_id', self.request.id) - result = self.AsyncResult(task_id) - if not isinstance(result, AbortableAsyncResult): - return False - return result.is_aborted() diff --git a/thesisenv/lib/python3.6/site-packages/celery/contrib/batches.py b/thesisenv/lib/python3.6/site-packages/celery/contrib/batches.py deleted file mode 100644 index 30f0a20..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/contrib/batches.py +++ /dev/null @@ -1,249 +0,0 @@ -# -*- coding: utf-8 -*- -""" -celery.contrib.batches -====================== - -Experimental task class that buffers messages and processes them as a list. - -.. warning:: - - For this to work you have to set - :setting:`CELERYD_PREFETCH_MULTIPLIER` to zero, or some value where - the final multiplied value is higher than ``flush_every``. - - In the future we hope to add the ability to direct batching tasks - to a channel with different QoS requirements than the task channel. - -**Simple Example** - -A click counter that flushes the buffer every 100 messages, and every -10 seconds. Does not do anything with the data, but can easily be modified -to store it in a database. - -.. code-block:: python - - # Flush after 100 messages, or 10 seconds. - @app.task(base=Batches, flush_every=100, flush_interval=10) - def count_click(requests): - from collections import Counter - count = Counter(request.kwargs['url'] for request in requests) - for url, count in count.items(): - print('>>> Clicks: {0} -> {1}'.format(url, count)) - - -Then you can ask for a click to be counted by doing:: - - >>> count_click.delay(url='http://example.com') - -**Example returning results** - -An interface to the Web of Trust API that flushes the buffer every 100 -messages, and every 10 seconds. - -.. code-block:: python - - import requests - from urlparse import urlparse - - from celery.contrib.batches import Batches - - wot_api_target = 'https://api.mywot.com/0.4/public_link_json' - - @app.task(base=Batches, flush_every=100, flush_interval=10) - def wot_api(requests): - sig = lambda url: url - reponses = wot_api_real( - (sig(*request.args, **request.kwargs) for request in requests) - ) - # use mark_as_done to manually return response data - for response, request in zip(reponses, requests): - app.backend.mark_as_done(request.id, response) - - - def wot_api_real(urls): - domains = [urlparse(url).netloc for url in urls] - response = requests.get( - wot_api_target, - params={'hosts': ('/').join(set(domains)) + '/'} - ) - return [response.json()[domain] for domain in domains] - -Using the API is done as follows:: - - >>> wot_api.delay('http://example.com') - -.. note:: - - If you don't have an ``app`` instance then use the current app proxy - instead:: - - from celery import current_app - app.backend.mark_as_done(request.id, response) - -""" -from __future__ import absolute_import - -from itertools import count - -from celery.task import Task -from celery.five import Empty, Queue -from celery.utils.log import get_logger -from celery.worker.job import Request -from celery.utils import noop - -__all__ = ['Batches'] - -logger = get_logger(__name__) - - -def consume_queue(queue): - """Iterator yielding all immediately available items in a - :class:`Queue.Queue`. - - The iterator stops as soon as the queue raises :exc:`Queue.Empty`. - - *Examples* - - >>> q = Queue() - >>> map(q.put, range(4)) - >>> list(consume_queue(q)) - [0, 1, 2, 3] - >>> list(consume_queue(q)) - [] - - """ - get = queue.get_nowait - while 1: - try: - yield get() - except Empty: - break - - -def apply_batches_task(task, args, loglevel, logfile): - task.push_request(loglevel=loglevel, logfile=logfile) - try: - result = task(*args) - except Exception as exc: - result = None - logger.error('Error: %r', exc, exc_info=True) - finally: - task.pop_request() - return result - - -class SimpleRequest(object): - """Pickleable request.""" - - #: task id - id = None - - #: task name - name = None - - #: positional arguments - args = () - - #: keyword arguments - kwargs = {} - - #: message delivery information. - delivery_info = None - - #: worker node name - hostname = None - - def __init__(self, id, name, args, kwargs, delivery_info, hostname): - self.id = id - self.name = name - self.args = args - self.kwargs = kwargs - self.delivery_info = delivery_info - self.hostname = hostname - - @classmethod - def from_request(cls, request): - return cls(request.id, request.name, request.args, - request.kwargs, request.delivery_info, request.hostname) - - -class Batches(Task): - abstract = True - - #: Maximum number of message in buffer. - flush_every = 10 - - #: Timeout in seconds before buffer is flushed anyway. - flush_interval = 30 - - def __init__(self): - self._buffer = Queue() - self._count = count(1) - self._tref = None - self._pool = None - - def run(self, requests): - raise NotImplementedError('must implement run(requests)') - - def Strategy(self, task, app, consumer): - self._pool = consumer.pool - hostname = consumer.hostname - eventer = consumer.event_dispatcher - Req = Request - connection_errors = consumer.connection_errors - timer = consumer.timer - put_buffer = self._buffer.put - flush_buffer = self._do_flush - - def task_message_handler(message, body, ack, reject, callbacks, **kw): - request = Req(body, on_ack=ack, app=app, hostname=hostname, - events=eventer, task=task, - connection_errors=connection_errors, - delivery_info=message.delivery_info) - put_buffer(request) - - if self._tref is None: # first request starts flush timer. - self._tref = timer.call_repeatedly( - self.flush_interval, flush_buffer, - ) - - if not next(self._count) % self.flush_every: - flush_buffer() - - return task_message_handler - - def flush(self, requests): - return self.apply_buffer(requests, ([SimpleRequest.from_request(r) - for r in requests], )) - - def _do_flush(self): - logger.debug('Batches: Wake-up to flush buffer...') - requests = None - if self._buffer.qsize(): - requests = list(consume_queue(self._buffer)) - if requests: - logger.debug('Batches: Buffer complete: %s', len(requests)) - self.flush(requests) - if not requests: - logger.debug('Batches: Canceling timer: Nothing in buffer.') - if self._tref: - self._tref.cancel() # cancel timer. - self._tref = None - - def apply_buffer(self, requests, args=(), kwargs={}): - acks_late = [], [] - [acks_late[r.task.acks_late].append(r) for r in requests] - assert requests and (acks_late[True] or acks_late[False]) - - def on_accepted(pid, time_accepted): - [req.acknowledge() for req in acks_late[False]] - - def on_return(result): - [req.acknowledge() for req in acks_late[True]] - - return self._pool.apply_async( - apply_batches_task, - (self, args, 0, None), - accept_callback=on_accepted, - callback=acks_late[True] and on_return or noop, - ) diff --git a/thesisenv/lib/python3.6/site-packages/celery/contrib/methods.py b/thesisenv/lib/python3.6/site-packages/celery/contrib/methods.py deleted file mode 100644 index 56aa7f4..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/contrib/methods.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- coding: utf-8 -*- -""" -celery.contrib.methods -====================== - -Task decorator that supports creating tasks out of methods. - -Examples --------- - -.. code-block:: python - - from celery.contrib.methods import task - - class X(object): - - @task() - def add(self, x, y): - return x + y - -or with any task decorator: - -.. code-block:: python - - from celery.contrib.methods import task_method - - class X(object): - - @app.task(filter=task_method) - def add(self, x, y): - return x + y - -.. note:: - - The task must use the new Task base class (:class:`celery.Task`), - and the old base class using classmethods (``celery.task.Task``, - ``celery.task.base.Task``). - - This means that you have to use the task decorator from a Celery app - instance, and not the old-API: - - .. code-block:: python - - - from celery import task # BAD - from celery.task import task # ALSO BAD - - # GOOD: - app = Celery(...) - - @app.task(filter=task_method) - def foo(self): pass - - # ALSO GOOD: - from celery import current_app - - @current_app.task(filter=task_method) - def foo(self): pass - - # ALSO GOOD: - from celery import shared_task - - @shared_task(filter=task_method) - def foo(self): pass - -Caveats -------- - -- Automatic naming won't be able to know what the class name is. - - The name will still be module_name + task_name, - so two methods with the same name in the same module will collide - so that only one task can run: - - .. code-block:: python - - class A(object): - - @task() - def add(self, x, y): - return x + y - - class B(object): - - @task() - def add(self, x, y): - return x + y - - would have to be written as: - - .. code-block:: python - - class A(object): - @task(name='A.add') - def add(self, x, y): - return x + y - - class B(object): - @task(name='B.add') - def add(self, x, y): - return x + y - -""" - -from __future__ import absolute_import - -from celery import current_app - -__all__ = ['task_method', 'task'] - - -class task_method(object): - - def __init__(self, task, *args, **kwargs): - self.task = task - - def __get__(self, obj, type=None): - if obj is None: - return self.task - task = self.task.__class__() - task.__self__ = obj - return task - - -def task(*args, **kwargs): - return current_app.task(*args, **dict(kwargs, filter=task_method)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/contrib/migrate.py b/thesisenv/lib/python3.6/site-packages/celery/contrib/migrate.py deleted file mode 100644 index e4a10e9..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/contrib/migrate.py +++ /dev/null @@ -1,365 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.contrib.migrate - ~~~~~~~~~~~~~~~~~~~~~~ - - Migration tools. - -""" -from __future__ import absolute_import, print_function, unicode_literals - -import socket - -from functools import partial -from itertools import cycle, islice - -from kombu import eventloop, Queue -from kombu.common import maybe_declare -from kombu.utils.encoding import ensure_bytes - -from celery.app import app_or_default -from celery.five import string, string_t -from celery.utils import worker_direct - -__all__ = ['StopFiltering', 'State', 'republish', 'migrate_task', - 'migrate_tasks', 'move', 'task_id_eq', 'task_id_in', - 'start_filter', 'move_task_by_id', 'move_by_idmap', - 'move_by_taskmap', 'move_direct', 'move_direct_by_id'] - -MOVING_PROGRESS_FMT = """\ -Moving task {state.filtered}/{state.strtotal}: \ -{body[task]}[{body[id]}]\ -""" - - -class StopFiltering(Exception): - pass - - -class State(object): - count = 0 - filtered = 0 - total_apx = 0 - - @property - def strtotal(self): - if not self.total_apx: - return '?' - return string(self.total_apx) - - def __repr__(self): - if self.filtered: - return '^{0.filtered}'.format(self) - return '{0.count}/{0.strtotal}'.format(self) - - -def republish(producer, message, exchange=None, routing_key=None, - remove_props=['application_headers', - 'content_type', - 'content_encoding', - 'headers']): - body = ensure_bytes(message.body) # use raw message body. - info, headers, props = (message.delivery_info, - message.headers, message.properties) - exchange = info['exchange'] if exchange is None else exchange - routing_key = info['routing_key'] if routing_key is None else routing_key - ctype, enc = message.content_type, message.content_encoding - # remove compression header, as this will be inserted again - # when the message is recompressed. - compression = headers.pop('compression', None) - - for key in remove_props: - props.pop(key, None) - - producer.publish(ensure_bytes(body), exchange=exchange, - routing_key=routing_key, compression=compression, - headers=headers, content_type=ctype, - content_encoding=enc, **props) - - -def migrate_task(producer, body_, message, queues=None): - info = message.delivery_info - queues = {} if queues is None else queues - republish(producer, message, - exchange=queues.get(info['exchange']), - routing_key=queues.get(info['routing_key'])) - - -def filter_callback(callback, tasks): - - def filtered(body, message): - if tasks and body['task'] not in tasks: - return - - return callback(body, message) - return filtered - - -def migrate_tasks(source, dest, migrate=migrate_task, app=None, - queues=None, **kwargs): - app = app_or_default(app) - queues = prepare_queues(queues) - producer = app.amqp.TaskProducer(dest) - migrate = partial(migrate, producer, queues=queues) - - def on_declare_queue(queue): - new_queue = queue(producer.channel) - new_queue.name = queues.get(queue.name, queue.name) - if new_queue.routing_key == queue.name: - new_queue.routing_key = queues.get(queue.name, - new_queue.routing_key) - if new_queue.exchange.name == queue.name: - new_queue.exchange.name = queues.get(queue.name, queue.name) - new_queue.declare() - - return start_filter(app, source, migrate, queues=queues, - on_declare_queue=on_declare_queue, **kwargs) - - -def _maybe_queue(app, q): - if isinstance(q, string_t): - return app.amqp.queues[q] - return q - - -def move(predicate, connection=None, exchange=None, routing_key=None, - source=None, app=None, callback=None, limit=None, transform=None, - **kwargs): - """Find tasks by filtering them and move the tasks to a new queue. - - :param predicate: Filter function used to decide which messages - to move. Must accept the standard signature of ``(body, message)`` - used by Kombu consumer callbacks. If the predicate wants the message - to be moved it must return either: - - 1) a tuple of ``(exchange, routing_key)``, or - - 2) a :class:`~kombu.entity.Queue` instance, or - - 3) any other true value which means the specified - ``exchange`` and ``routing_key`` arguments will be used. - - :keyword connection: Custom connection to use. - :keyword source: Optional list of source queues to use instead of the - default (which is the queues in :setting:`CELERY_QUEUES`). - This list can also contain new :class:`~kombu.entity.Queue` instances. - :keyword exchange: Default destination exchange. - :keyword routing_key: Default destination routing key. - :keyword limit: Limit number of messages to filter. - :keyword callback: Callback called after message moved, - with signature ``(state, body, message)``. - :keyword transform: Optional function to transform the return - value (destination) of the filter function. - - Also supports the same keyword arguments as :func:`start_filter`. - - To demonstrate, the :func:`move_task_by_id` operation can be implemented - like this: - - .. code-block:: python - - def is_wanted_task(body, message): - if body['id'] == wanted_id: - return Queue('foo', exchange=Exchange('foo'), - routing_key='foo') - - move(is_wanted_task) - - or with a transform: - - .. code-block:: python - - def transform(value): - if isinstance(value, string_t): - return Queue(value, Exchange(value), value) - return value - - move(is_wanted_task, transform=transform) - - The predicate may also return a tuple of ``(exchange, routing_key)`` - to specify the destination to where the task should be moved, - or a :class:`~kombu.entitiy.Queue` instance. - Any other true value means that the task will be moved to the - default exchange/routing_key. - - """ - app = app_or_default(app) - queues = [_maybe_queue(app, queue) for queue in source or []] or None - with app.connection_or_acquire(connection, pool=False) as conn: - producer = app.amqp.TaskProducer(conn) - state = State() - - def on_task(body, message): - ret = predicate(body, message) - if ret: - if transform: - ret = transform(ret) - if isinstance(ret, Queue): - maybe_declare(ret, conn.default_channel) - ex, rk = ret.exchange.name, ret.routing_key - else: - ex, rk = expand_dest(ret, exchange, routing_key) - republish(producer, message, - exchange=ex, routing_key=rk) - message.ack() - - state.filtered += 1 - if callback: - callback(state, body, message) - if limit and state.filtered >= limit: - raise StopFiltering() - - return start_filter(app, conn, on_task, consume_from=queues, **kwargs) - - -def expand_dest(ret, exchange, routing_key): - try: - ex, rk = ret - except (TypeError, ValueError): - ex, rk = exchange, routing_key - return ex, rk - - -def task_id_eq(task_id, body, message): - return body['id'] == task_id - - -def task_id_in(ids, body, message): - return body['id'] in ids - - -def prepare_queues(queues): - if isinstance(queues, string_t): - queues = queues.split(',') - if isinstance(queues, list): - queues = dict(tuple(islice(cycle(q.split(':')), None, 2)) - for q in queues) - if queues is None: - queues = {} - return queues - - -def start_filter(app, conn, filter, limit=None, timeout=1.0, - ack_messages=False, tasks=None, queues=None, - callback=None, forever=False, on_declare_queue=None, - consume_from=None, state=None, accept=None, **kwargs): - state = state or State() - queues = prepare_queues(queues) - consume_from = [_maybe_queue(app, q) - for q in consume_from or list(queues)] - if isinstance(tasks, string_t): - tasks = set(tasks.split(',')) - if tasks is None: - tasks = set([]) - - def update_state(body, message): - state.count += 1 - if limit and state.count >= limit: - raise StopFiltering() - - def ack_message(body, message): - message.ack() - - consumer = app.amqp.TaskConsumer(conn, queues=consume_from, accept=accept) - - if tasks: - filter = filter_callback(filter, tasks) - update_state = filter_callback(update_state, tasks) - ack_message = filter_callback(ack_message, tasks) - - consumer.register_callback(filter) - consumer.register_callback(update_state) - if ack_messages: - consumer.register_callback(ack_message) - if callback is not None: - callback = partial(callback, state) - if tasks: - callback = filter_callback(callback, tasks) - consumer.register_callback(callback) - - # declare all queues on the new broker. - for queue in consumer.queues: - if queues and queue.name not in queues: - continue - if on_declare_queue is not None: - on_declare_queue(queue) - try: - _, mcount, _ = queue(consumer.channel).queue_declare(passive=True) - if mcount: - state.total_apx += mcount - except conn.channel_errors: - pass - - # start migrating messages. - with consumer: - try: - for _ in eventloop(conn, # pragma: no cover - timeout=timeout, ignore_timeouts=forever): - pass - except socket.timeout: - pass - except StopFiltering: - pass - return state - - -def move_task_by_id(task_id, dest, **kwargs): - """Find a task by id and move it to another queue. - - :param task_id: Id of task to move. - :param dest: Destination queue. - - Also supports the same keyword arguments as :func:`move`. - - """ - return move_by_idmap({task_id: dest}, **kwargs) - - -def move_by_idmap(map, **kwargs): - """Moves tasks by matching from a ``task_id: queue`` mapping, - where ``queue`` is a queue to move the task to. - - Example:: - - >>> move_by_idmap({ - ... '5bee6e82-f4ac-468e-bd3d-13e8600250bc': Queue('name'), - ... 'ada8652d-aef3-466b-abd2-becdaf1b82b3': Queue('name'), - ... '3a2b140d-7db1-41ba-ac90-c36a0ef4ab1f': Queue('name')}, - ... queues=['hipri']) - - """ - def task_id_in_map(body, message): - return map.get(body['id']) - - # adding the limit means that we don't have to consume any more - # when we've found everything. - return move(task_id_in_map, limit=len(map), **kwargs) - - -def move_by_taskmap(map, **kwargs): - """Moves tasks by matching from a ``task_name: queue`` mapping, - where ``queue`` is the queue to move the task to. - - Example:: - - >>> move_by_taskmap({ - ... 'tasks.add': Queue('name'), - ... 'tasks.mul': Queue('name'), - ... }) - - """ - - def task_name_in_map(body, message): - return map.get(body['task']) # <- name of task - - return move(task_name_in_map, **kwargs) - - -def filter_status(state, body, message, **kwargs): - print(MOVING_PROGRESS_FMT.format(state=state, body=body, **kwargs)) - - -move_direct = partial(move, transform=worker_direct) -move_direct_by_id = partial(move_task_by_id, transform=worker_direct) -move_direct_by_idmap = partial(move_by_idmap, transform=worker_direct) -move_direct_by_taskmap = partial(move_by_taskmap, transform=worker_direct) diff --git a/thesisenv/lib/python3.6/site-packages/celery/contrib/rdb.py b/thesisenv/lib/python3.6/site-packages/celery/contrib/rdb.py deleted file mode 100644 index 3f218ae..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/contrib/rdb.py +++ /dev/null @@ -1,183 +0,0 @@ -# -*- coding: utf-8 -*- -""" -celery.contrib.rdb -================== - -Remote debugger for Celery tasks running in multiprocessing pool workers. -Inspired by http://snippets.dzone.com/posts/show/7248 - -**Usage** - -.. code-block:: python - - from celery.contrib import rdb - from celery import task - - @task() - def add(x, y): - result = x + y - rdb.set_trace() - return result - - -**Environment Variables** - -.. envvar:: CELERY_RDB_HOST - - Hostname to bind to. Default is '127.0.01', which means the socket - will only be accessible from the local host. - -.. envvar:: CELERY_RDB_PORT - - Base port to bind to. Default is 6899. - The debugger will try to find an available port starting from the - base port. The selected port will be logged by the worker. - -""" -from __future__ import absolute_import, print_function - -import errno -import os -import socket -import sys - -from pdb import Pdb - -from billiard import current_process - -from celery.five import range - -__all__ = ['CELERY_RDB_HOST', 'CELERY_RDB_PORT', 'default_port', - 'Rdb', 'debugger', 'set_trace'] - -default_port = 6899 - -CELERY_RDB_HOST = os.environ.get('CELERY_RDB_HOST') or '127.0.0.1' -CELERY_RDB_PORT = int(os.environ.get('CELERY_RDB_PORT') or default_port) - -#: Holds the currently active debugger. -_current = [None] - -_frame = getattr(sys, '_getframe') - -NO_AVAILABLE_PORT = """\ -{self.ident}: Couldn't find an available port. - -Please specify one using the CELERY_RDB_PORT environment variable. -""" - -BANNER = """\ -{self.ident}: Please telnet into {self.host} {self.port}. - -Type `exit` in session to continue. - -{self.ident}: Waiting for client... -""" - -SESSION_STARTED = '{self.ident}: Now in session with {self.remote_addr}.' -SESSION_ENDED = '{self.ident}: Session with {self.remote_addr} ended.' - - -class Rdb(Pdb): - me = 'Remote Debugger' - _prev_outs = None - _sock = None - - def __init__(self, host=CELERY_RDB_HOST, port=CELERY_RDB_PORT, - port_search_limit=100, port_skew=+0, out=sys.stdout): - self.active = True - self.out = out - - self._prev_handles = sys.stdin, sys.stdout - - self._sock, this_port = self.get_avail_port( - host, port, port_search_limit, port_skew, - ) - self._sock.setblocking(1) - self._sock.listen(1) - self.ident = '{0}:{1}'.format(self.me, this_port) - self.host = host - self.port = this_port - self.say(BANNER.format(self=self)) - - self._client, address = self._sock.accept() - self._client.setblocking(1) - self.remote_addr = ':'.join(str(v) for v in address) - self.say(SESSION_STARTED.format(self=self)) - self._handle = sys.stdin = sys.stdout = self._client.makefile('rw') - Pdb.__init__(self, completekey='tab', - stdin=self._handle, stdout=self._handle) - - def get_avail_port(self, host, port, search_limit=100, skew=+0): - try: - _, skew = current_process().name.split('-') - skew = int(skew) - except ValueError: - pass - this_port = None - for i in range(search_limit): - _sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - this_port = port + skew + i - try: - _sock.bind((host, this_port)) - except socket.error as exc: - if exc.errno in [errno.EADDRINUSE, errno.EINVAL]: - continue - raise - else: - return _sock, this_port - else: - raise Exception(NO_AVAILABLE_PORT.format(self=self)) - - def say(self, m): - print(m, file=self.out) - - def __enter__(self): - return self - - def __exit__(self, *exc_info): - self._close_session() - - def _close_session(self): - self.stdin, self.stdout = sys.stdin, sys.stdout = self._prev_handles - if self.active: - if self._handle is not None: - self._handle.close() - if self._client is not None: - self._client.close() - if self._sock is not None: - self._sock.close() - self.active = False - self.say(SESSION_ENDED.format(self=self)) - - def do_continue(self, arg): - self._close_session() - self.set_continue() - return 1 - do_c = do_cont = do_continue - - def do_quit(self, arg): - self._close_session() - self.set_quit() - return 1 - do_q = do_exit = do_quit - - def set_quit(self): - # this raises a BdbQuit exception that we are unable to catch. - sys.settrace(None) - - -def debugger(): - """Return the current debugger instance (if any), - or creates a new one.""" - rdb = _current[0] - if rdb is None or not rdb.active: - rdb = _current[0] = Rdb() - return rdb - - -def set_trace(frame=None): - """Set breakpoint at current location, or a specified frame""" - if frame is None: - frame = _frame().f_back - return debugger().set_trace(frame) diff --git a/thesisenv/lib/python3.6/site-packages/celery/contrib/sphinx.py b/thesisenv/lib/python3.6/site-packages/celery/contrib/sphinx.py deleted file mode 100644 index 2e57431..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/contrib/sphinx.py +++ /dev/null @@ -1,76 +0,0 @@ -# -*- coding: utf-8 -*- -""" -celery.contrib.sphinx -===================== - -Sphinx documentation plugin - -**Usage** - -Add the extension to your :file:`docs/conf.py` configuration module: - -.. code-block:: python - - extensions = (..., - 'celery.contrib.sphinx') - -If you would like to change the prefix for tasks in reference documentation -then you can change the ``celery_task_prefix`` configuration value: - -.. code-block:: python - - celery_task_prefix = '(task)' # < default - - -With the extension installed `autodoc` will automatically find -task decorated objects and generate the correct (as well as -add a ``(task)`` prefix), and you can also refer to the tasks -using `:task:proj.tasks.add` syntax. - -Use ``.. autotask::`` to manually document a task. - -""" -from __future__ import absolute_import - -try: - from inspect import formatargspec, getfullargspec as getargspec -except ImportError: # Py2 - from inspect import formatargspec, getargspec # noqa - -from sphinx.domains.python import PyModulelevel -from sphinx.ext.autodoc import FunctionDocumenter - -from celery.app.task import BaseTask - - -class TaskDocumenter(FunctionDocumenter): - objtype = 'task' - member_order = 11 - - @classmethod - def can_document_member(cls, member, membername, isattr, parent): - return isinstance(member, BaseTask) and getattr(member, '__wrapped__') - - def format_args(self): - wrapped = getattr(self.object, '__wrapped__') - if wrapped is not None: - argspec = getargspec(wrapped) - fmt = formatargspec(*argspec) - fmt = fmt.replace('\\', '\\\\') - return fmt - return '' - - def document_members(self, all_members=False): - pass - - -class TaskDirective(PyModulelevel): - - def get_signature_prefix(self, sig): - return self.env.config.celery_task_prefix - - -def setup(app): - app.add_autodocumenter(TaskDocumenter) - app.domains['py'].directives['task'] = TaskDirective - app.add_config_value('celery_task_prefix', '(task)', True) diff --git a/thesisenv/lib/python3.6/site-packages/celery/datastructures.py b/thesisenv/lib/python3.6/site-packages/celery/datastructures.py deleted file mode 100644 index 32a1d54..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/datastructures.py +++ /dev/null @@ -1,671 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.datastructures - ~~~~~~~~~~~~~~~~~~~~~ - - Custom types and data structures. - -""" -from __future__ import absolute_import, print_function, unicode_literals - -import sys -import time - -from collections import defaultdict, Mapping, MutableMapping, MutableSet -from heapq import heapify, heappush, heappop -from functools import partial -from itertools import chain - -from billiard.einfo import ExceptionInfo # noqa -from kombu.utils.encoding import safe_str -from kombu.utils.limits import TokenBucket # noqa - -from celery.five import items -from celery.utils.functional import LRUCache, first, uniq # noqa - -try: - from django.utils.functional import LazyObject, LazySettings -except ImportError: - class LazyObject(object): # noqa - pass - LazySettings = LazyObject # noqa - -DOT_HEAD = """ -{IN}{type} {id} {{ -{INp}graph [{attrs}] -""" -DOT_ATTR = '{name}={value}' -DOT_NODE = '{INp}"{0}" [{attrs}]' -DOT_EDGE = '{INp}"{0}" {dir} "{1}" [{attrs}]' -DOT_ATTRSEP = ', ' -DOT_DIRS = {'graph': '--', 'digraph': '->'} -DOT_TAIL = '{IN}}}' - -__all__ = ['GraphFormatter', 'CycleError', 'DependencyGraph', - 'AttributeDictMixin', 'AttributeDict', 'DictAttribute', - 'ConfigurationView', 'LimitedSet'] - - -def force_mapping(m): - if isinstance(m, (LazyObject, LazySettings)): - m = m._wrapped - return DictAttribute(m) if not isinstance(m, Mapping) else m - - -class GraphFormatter(object): - _attr = DOT_ATTR.strip() - _node = DOT_NODE.strip() - _edge = DOT_EDGE.strip() - _head = DOT_HEAD.strip() - _tail = DOT_TAIL.strip() - _attrsep = DOT_ATTRSEP - _dirs = dict(DOT_DIRS) - - scheme = { - 'shape': 'box', - 'arrowhead': 'vee', - 'style': 'filled', - 'fontname': 'HelveticaNeue', - } - edge_scheme = { - 'color': 'darkseagreen4', - 'arrowcolor': 'black', - 'arrowsize': 0.7, - } - node_scheme = {'fillcolor': 'palegreen3', 'color': 'palegreen4'} - term_scheme = {'fillcolor': 'palegreen1', 'color': 'palegreen2'} - graph_scheme = {'bgcolor': 'mintcream'} - - def __init__(self, root=None, type=None, id=None, - indent=0, inw=' ' * 4, **scheme): - self.id = id or 'dependencies' - self.root = root - self.type = type or 'digraph' - self.direction = self._dirs[self.type] - self.IN = inw * (indent or 0) - self.INp = self.IN + inw - self.scheme = dict(self.scheme, **scheme) - self.graph_scheme = dict(self.graph_scheme, root=self.label(self.root)) - - def attr(self, name, value): - value = '"{0}"'.format(value) - return self.FMT(self._attr, name=name, value=value) - - def attrs(self, d, scheme=None): - d = dict(self.scheme, **dict(scheme, **d or {}) if scheme else d) - return self._attrsep.join( - safe_str(self.attr(k, v)) for k, v in items(d) - ) - - def head(self, **attrs): - return self.FMT( - self._head, id=self.id, type=self.type, - attrs=self.attrs(attrs, self.graph_scheme), - ) - - def tail(self): - return self.FMT(self._tail) - - def label(self, obj): - return obj - - def node(self, obj, **attrs): - return self.draw_node(obj, self.node_scheme, attrs) - - def terminal_node(self, obj, **attrs): - return self.draw_node(obj, self.term_scheme, attrs) - - def edge(self, a, b, **attrs): - return self.draw_edge(a, b, **attrs) - - def _enc(self, s): - return s.encode('utf-8', 'ignore') - - def FMT(self, fmt, *args, **kwargs): - return self._enc(fmt.format( - *args, **dict(kwargs, IN=self.IN, INp=self.INp) - )) - - def draw_edge(self, a, b, scheme=None, attrs=None): - return self.FMT( - self._edge, self.label(a), self.label(b), - dir=self.direction, attrs=self.attrs(attrs, self.edge_scheme), - ) - - def draw_node(self, obj, scheme=None, attrs=None): - return self.FMT( - self._node, self.label(obj), attrs=self.attrs(attrs, scheme), - ) - - -class CycleError(Exception): - """A cycle was detected in an acyclic graph.""" - - -class DependencyGraph(object): - """A directed acyclic graph of objects and their dependencies. - - Supports a robust topological sort - to detect the order in which they must be handled. - - Takes an optional iterator of ``(obj, dependencies)`` - tuples to build the graph from. - - .. warning:: - - Does not support cycle detection. - - """ - - def __init__(self, it=None, formatter=None): - self.formatter = formatter or GraphFormatter() - self.adjacent = {} - if it is not None: - self.update(it) - - def add_arc(self, obj): - """Add an object to the graph.""" - self.adjacent.setdefault(obj, []) - - def add_edge(self, A, B): - """Add an edge from object ``A`` to object ``B`` - (``A`` depends on ``B``).""" - self[A].append(B) - - def connect(self, graph): - """Add nodes from another graph.""" - self.adjacent.update(graph.adjacent) - - def topsort(self): - """Sort the graph topologically. - - :returns: a list of objects in the order - in which they must be handled. - - """ - graph = DependencyGraph() - components = self._tarjan72() - - NC = dict((node, component) - for component in components - for node in component) - for component in components: - graph.add_arc(component) - for node in self: - node_c = NC[node] - for successor in self[node]: - successor_c = NC[successor] - if node_c != successor_c: - graph.add_edge(node_c, successor_c) - return [t[0] for t in graph._khan62()] - - def valency_of(self, obj): - """Return the valency (degree) of a vertex in the graph.""" - try: - l = [len(self[obj])] - except KeyError: - return 0 - for node in self[obj]: - l.append(self.valency_of(node)) - return sum(l) - - def update(self, it): - """Update the graph with data from a list - of ``(obj, dependencies)`` tuples.""" - tups = list(it) - for obj, _ in tups: - self.add_arc(obj) - for obj, deps in tups: - for dep in deps: - self.add_edge(obj, dep) - - def edges(self): - """Return generator that yields for all edges in the graph.""" - return (obj for obj, adj in items(self) if adj) - - def _khan62(self): - """Khans simple topological sort algorithm from '62 - - See http://en.wikipedia.org/wiki/Topological_sorting - - """ - count = defaultdict(lambda: 0) - result = [] - - for node in self: - for successor in self[node]: - count[successor] += 1 - ready = [node for node in self if not count[node]] - - while ready: - node = ready.pop() - result.append(node) - - for successor in self[node]: - count[successor] -= 1 - if count[successor] == 0: - ready.append(successor) - result.reverse() - return result - - def _tarjan72(self): - """Tarjan's algorithm to find strongly connected components. - - See http://bit.ly/vIMv3h. - - """ - result, stack, low = [], [], {} - - def visit(node): - if node in low: - return - num = len(low) - low[node] = num - stack_pos = len(stack) - stack.append(node) - - for successor in self[node]: - visit(successor) - low[node] = min(low[node], low[successor]) - - if num == low[node]: - component = tuple(stack[stack_pos:]) - stack[stack_pos:] = [] - result.append(component) - for item in component: - low[item] = len(self) - - for node in self: - visit(node) - - return result - - def to_dot(self, fh, formatter=None): - """Convert the graph to DOT format. - - :param fh: A file, or a file-like object to write the graph to. - - """ - seen = set() - draw = formatter or self.formatter - P = partial(print, file=fh) - - def if_not_seen(fun, obj): - if draw.label(obj) not in seen: - P(fun(obj)) - seen.add(draw.label(obj)) - - P(draw.head()) - for obj, adjacent in items(self): - if not adjacent: - if_not_seen(draw.terminal_node, obj) - for req in adjacent: - if_not_seen(draw.node, obj) - P(draw.edge(obj, req)) - P(draw.tail()) - - def format(self, obj): - return self.formatter(obj) if self.formatter else obj - - def __iter__(self): - return iter(self.adjacent) - - def __getitem__(self, node): - return self.adjacent[node] - - def __len__(self): - return len(self.adjacent) - - def __contains__(self, obj): - return obj in self.adjacent - - def _iterate_items(self): - return items(self.adjacent) - items = iteritems = _iterate_items - - def __repr__(self): - return '\n'.join(self.repr_node(N) for N in self) - - def repr_node(self, obj, level=1, fmt='{0}({1})'): - output = [fmt.format(obj, self.valency_of(obj))] - if obj in self: - for other in self[obj]: - d = fmt.format(other, self.valency_of(other)) - output.append(' ' * level + d) - output.extend(self.repr_node(other, level + 1).split('\n')[1:]) - return '\n'.join(output) - - -class AttributeDictMixin(object): - """Augment classes with a Mapping interface by adding attribute access. - - I.e. `d.key -> d[key]`. - - """ - - def __getattr__(self, k): - """`d.key -> d[key]`""" - try: - return self[k] - except KeyError: - raise AttributeError( - '{0!r} object has no attribute {1!r}'.format( - type(self).__name__, k)) - - def __setattr__(self, key, value): - """`d[key] = value -> d.key = value`""" - self[key] = value - - -class AttributeDict(dict, AttributeDictMixin): - """Dict subclass with attribute access.""" - pass - - -class DictAttribute(object): - """Dict interface to attributes. - - `obj[k] -> obj.k` - `obj[k] = val -> obj.k = val` - - """ - obj = None - - def __init__(self, obj): - object.__setattr__(self, 'obj', obj) - - def __getattr__(self, key): - return getattr(self.obj, key) - - def __setattr__(self, key, value): - return setattr(self.obj, key, value) - - def get(self, key, default=None): - try: - return self[key] - except KeyError: - return default - - def setdefault(self, key, default): - try: - return self[key] - except KeyError: - self[key] = default - return default - - def __getitem__(self, key): - try: - return getattr(self.obj, key) - except AttributeError: - raise KeyError(key) - - def __setitem__(self, key, value): - setattr(self.obj, key, value) - - def __contains__(self, key): - return hasattr(self.obj, key) - - def _iterate_keys(self): - return iter(dir(self.obj)) - iterkeys = _iterate_keys - - def __iter__(self): - return self._iterate_keys() - - def _iterate_items(self): - for key in self._iterate_keys(): - yield key, getattr(self.obj, key) - iteritems = _iterate_items - - def _iterate_values(self): - for key in self._iterate_keys(): - yield getattr(self.obj, key) - itervalues = _iterate_values - - if sys.version_info[0] == 3: # pragma: no cover - items = _iterate_items - keys = _iterate_keys - values = _iterate_values - else: - - def keys(self): - return list(self) - - def items(self): - return list(self._iterate_items()) - - def values(self): - return list(self._iterate_values()) -MutableMapping.register(DictAttribute) - - -class ConfigurationView(AttributeDictMixin): - """A view over an applications configuration dicts. - - Custom (but older) version of :class:`collections.ChainMap`. - - If the key does not exist in ``changes``, the ``defaults`` dicts - are consulted. - - :param changes: Dict containing changes to the configuration. - :param defaults: List of dicts containing the default configuration. - - """ - changes = None - defaults = None - _order = None - - def __init__(self, changes, defaults): - self.__dict__.update(changes=changes, defaults=defaults, - _order=[changes] + defaults) - - def add_defaults(self, d): - d = force_mapping(d) - self.defaults.insert(0, d) - self._order.insert(1, d) - - def __getitem__(self, key): - for d in self._order: - try: - return d[key] - except KeyError: - pass - raise KeyError(key) - - def __setitem__(self, key, value): - self.changes[key] = value - - def first(self, *keys): - return first(None, (self.get(key) for key in keys)) - - def get(self, key, default=None): - try: - return self[key] - except KeyError: - return default - - def clear(self): - """Remove all changes, but keep defaults.""" - self.changes.clear() - - def setdefault(self, key, default): - try: - return self[key] - except KeyError: - self[key] = default - return default - - def update(self, *args, **kwargs): - return self.changes.update(*args, **kwargs) - - def __contains__(self, key): - return any(key in m for m in self._order) - - def __bool__(self): - return any(self._order) - __nonzero__ = __bool__ # Py2 - - def __repr__(self): - return repr(dict(items(self))) - - def __iter__(self): - return self._iterate_keys() - - def __len__(self): - # The logic for iterating keys includes uniq(), - # so to be safe we count by explicitly iterating - return len(set().union(*self._order)) - - def _iter(self, op): - # defaults must be first in the stream, so values in - # changes takes precedence. - return chain(*[op(d) for d in reversed(self._order)]) - - def _iterate_keys(self): - return uniq(self._iter(lambda d: d)) - iterkeys = _iterate_keys - - def _iterate_items(self): - return ((key, self[key]) for key in self) - iteritems = _iterate_items - - def _iterate_values(self): - return (self[key] for key in self) - itervalues = _iterate_values - - if sys.version_info[0] == 3: # pragma: no cover - keys = _iterate_keys - items = _iterate_items - values = _iterate_values - - else: # noqa - def keys(self): - return list(self._iterate_keys()) - - def items(self): - return list(self._iterate_items()) - - def values(self): - return list(self._iterate_values()) - -MutableMapping.register(ConfigurationView) - - -class LimitedSet(object): - """Kind-of Set with limitations. - - Good for when you need to test for membership (`a in set`), - but the set should not grow unbounded. - - :keyword maxlen: Maximum number of members before we start - evicting expired members. - :keyword expires: Time in seconds, before a membership expires. - - """ - - def __init__(self, maxlen=None, expires=None, data=None, heap=None): - # heap is ignored - self.maxlen = maxlen - self.expires = expires - self._data = {} if data is None else data - self._heap = [] - - # make shortcuts - self.__len__ = self._heap.__len__ - self.__contains__ = self._data.__contains__ - - self._refresh_heap() - - def _refresh_heap(self): - self._heap[:] = [(t, key) for key, t in items(self._data)] - heapify(self._heap) - - def add(self, key, now=time.time, heappush=heappush): - """Add a new member.""" - # offset is there to modify the length of the list, - # this way we can expire an item before inserting the value, - # and it will end up in the correct order. - self.purge(1, offset=1) - inserted = now() - self._data[key] = inserted - heappush(self._heap, (inserted, key)) - - def clear(self): - """Remove all members""" - self._data.clear() - self._heap[:] = [] - - def discard(self, value): - """Remove membership by finding value.""" - try: - itime = self._data[value] - except KeyError: - return - try: - self._heap.remove((itime, value)) - except ValueError: - pass - self._data.pop(value, None) - pop_value = discard # XXX compat - - def purge(self, limit=None, offset=0, now=time.time): - """Purge expired items.""" - H, maxlen = self._heap, self.maxlen - if not maxlen: - return - - # If the data/heap gets corrupted and limit is None - # this will go into an infinite loop, so limit must - # have a value to guard the loop. - limit = len(self) + offset if limit is None else limit - - i = 0 - while len(self) + offset > maxlen: - if i >= limit: - break - try: - item = heappop(H) - except IndexError: - break - if self.expires: - if now() < item[0] + self.expires: - heappush(H, item) - break - try: - self._data.pop(item[1]) - except KeyError: # out of sync with heap - pass - i += 1 - - def update(self, other): - if isinstance(other, LimitedSet): - self._data.update(other._data) - self._refresh_heap() - else: - for obj in other: - self.add(obj) - - def as_dict(self): - return self._data - - def __eq__(self, other): - return self._heap == other._heap - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return 'LimitedSet({0})'.format(len(self)) - - def __iter__(self): - return (item[1] for item in self._heap) - - def __len__(self): - return len(self._heap) - - def __contains__(self, key): - return key in self._data - - def __reduce__(self): - return self.__class__, (self.maxlen, self.expires, self._data) -MutableSet.register(LimitedSet) diff --git a/thesisenv/lib/python3.6/site-packages/celery/events/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/events/__init__.py deleted file mode 100644 index 65809cf..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/events/__init__.py +++ /dev/null @@ -1,408 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.events - ~~~~~~~~~~~~~ - - Events is a stream of messages sent for certain actions occurring - in the worker (and clients if :setting:`CELERY_SEND_TASK_SENT_EVENT` - is enabled), used for monitoring purposes. - -""" -from __future__ import absolute_import - -import os -import time -import threading -import warnings - -from collections import deque -from contextlib import contextmanager -from copy import copy -from operator import itemgetter - -from kombu import Exchange, Queue, Producer -from kombu.connection import maybe_channel -from kombu.mixins import ConsumerMixin -from kombu.utils import cached_property - -from celery.app import app_or_default -from celery.utils import anon_nodename, uuid -from celery.utils.functional import dictfilter -from celery.utils.timeutils import adjust_timestamp, utcoffset, maybe_s_to_ms - -__all__ = ['Events', 'Event', 'EventDispatcher', 'EventReceiver'] - -event_exchange = Exchange('celeryev', type='topic') - -_TZGETTER = itemgetter('utcoffset', 'timestamp') - -W_YAJL = """ -anyjson is currently using the yajl library. -This json implementation is broken, it severely truncates floats -so timestamps will not work. - -Please uninstall yajl or force anyjson to use a different library. -""" - -CLIENT_CLOCK_SKEW = -1 - - -def get_exchange(conn): - ex = copy(event_exchange) - if conn.transport.driver_type == 'redis': - # quick hack for Issue #436 - ex.type = 'fanout' - return ex - - -def Event(type, _fields=None, __dict__=dict, __now__=time.time, **fields): - """Create an event. - - An event is a dictionary, the only required field is ``type``. - A ``timestamp`` field will be set to the current time if not provided. - - """ - event = __dict__(_fields, **fields) if _fields else fields - if 'timestamp' not in event: - event.update(timestamp=__now__(), type=type) - else: - event['type'] = type - return event - - -def group_from(type): - """Get the group part of an event type name. - - E.g.:: - - >>> group_from('task-sent') - 'task' - - >>> group_from('custom-my-event') - 'custom' - - """ - return type.split('-', 1)[0] - - -class EventDispatcher(object): - """Dispatches event messages. - - :param connection: Connection to the broker. - - :keyword hostname: Hostname to identify ourselves as, - by default uses the hostname returned by - :func:`~celery.utils.anon_nodename`. - - :keyword groups: List of groups to send events for. :meth:`send` will - ignore send requests to groups not in this list. - If this is :const:`None`, all events will be sent. Example groups - include ``"task"`` and ``"worker"``. - - :keyword enabled: Set to :const:`False` to not actually publish any events, - making :meth:`send` a noop operation. - - :keyword channel: Can be used instead of `connection` to specify - an exact channel to use when sending events. - - :keyword buffer_while_offline: If enabled events will be buffered - while the connection is down. :meth:`flush` must be called - as soon as the connection is re-established. - - You need to :meth:`close` this after use. - - """ - DISABLED_TRANSPORTS = set(['sql']) - - app = None - - # set of callbacks to be called when :meth:`enabled`. - on_enabled = None - - # set of callbacks to be called when :meth:`disabled`. - on_disabled = None - - def __init__(self, connection=None, hostname=None, enabled=True, - channel=None, buffer_while_offline=True, app=None, - serializer=None, groups=None): - self.app = app_or_default(app or self.app) - self.connection = connection - self.channel = channel - self.hostname = hostname or anon_nodename() - self.buffer_while_offline = buffer_while_offline - self.mutex = threading.Lock() - self.producer = None - self._outbound_buffer = deque() - self.serializer = serializer or self.app.conf.CELERY_EVENT_SERIALIZER - self.on_enabled = set() - self.on_disabled = set() - self.groups = set(groups or []) - self.tzoffset = [-time.timezone, -time.altzone] - self.clock = self.app.clock - if not connection and channel: - self.connection = channel.connection.client - self.enabled = enabled - conninfo = self.connection or self.app.connection() - self.exchange = get_exchange(conninfo) - if conninfo.transport.driver_type in self.DISABLED_TRANSPORTS: - self.enabled = False - if self.enabled: - self.enable() - self.headers = {'hostname': self.hostname} - self.pid = os.getpid() - self.warn_if_yajl() - - def warn_if_yajl(self): - import anyjson - if anyjson.implementation.name == 'yajl': - warnings.warn(UserWarning(W_YAJL)) - - def __enter__(self): - return self - - def __exit__(self, *exc_info): - self.close() - - def enable(self): - self.producer = Producer(self.channel or self.connection, - exchange=self.exchange, - serializer=self.serializer) - self.enabled = True - for callback in self.on_enabled: - callback() - - def disable(self): - if self.enabled: - self.enabled = False - self.close() - for callback in self.on_disabled: - callback() - - def publish(self, type, fields, producer, retry=False, - retry_policy=None, blind=False, utcoffset=utcoffset, - Event=Event): - """Publish event using a custom :class:`~kombu.Producer` - instance. - - :param type: Event type name, with group separated by dash (`-`). - :param fields: Dictionary of event fields, must be json serializable. - :param producer: :class:`~kombu.Producer` instance to use, - only the ``publish`` method will be called. - :keyword retry: Retry in the event of connection failure. - :keyword retry_policy: Dict of custom retry policy, see - :meth:`~kombu.Connection.ensure`. - :keyword blind: Don't set logical clock value (also do not forward - the internal logical clock). - :keyword Event: Event type used to create event, - defaults to :func:`Event`. - :keyword utcoffset: Function returning the current utcoffset in hours. - - """ - - with self.mutex: - clock = None if blind else self.clock.forward() - event = Event(type, hostname=self.hostname, utcoffset=utcoffset(), - pid=self.pid, clock=clock, **fields) - exchange = self.exchange - producer.publish( - event, - routing_key=type.replace('-', '.'), - exchange=exchange.name, - retry=retry, - retry_policy=retry_policy, - declare=[exchange], - serializer=self.serializer, - headers=self.headers, - ) - - def send(self, type, blind=False, **fields): - """Send event. - - :param type: Event type name, with group separated by dash (`-`). - :keyword retry: Retry in the event of connection failure. - :keyword retry_policy: Dict of custom retry policy, see - :meth:`~kombu.Connection.ensure`. - :keyword blind: Don't set logical clock value (also do not forward - the internal logical clock). - :keyword Event: Event type used to create event, - defaults to :func:`Event`. - :keyword utcoffset: Function returning the current utcoffset in hours. - :keyword \*\*fields: Event fields, must be json serializable. - - """ - if self.enabled: - groups = self.groups - if groups and group_from(type) not in groups: - return - try: - self.publish(type, fields, self.producer, blind) - except Exception as exc: - if not self.buffer_while_offline: - raise - self._outbound_buffer.append((type, fields, exc)) - - def flush(self): - """Flushes the outbound buffer.""" - while self._outbound_buffer: - try: - type, fields, _ = self._outbound_buffer.popleft() - except IndexError: - return - self.send(type, **fields) - - def extend_buffer(self, other): - """Copies the outbound buffer of another instance.""" - self._outbound_buffer.extend(other._outbound_buffer) - - def close(self): - """Close the event dispatcher.""" - self.mutex.locked() and self.mutex.release() - self.producer = None - - def _get_publisher(self): - return self.producer - - def _set_publisher(self, producer): - self.producer = producer - publisher = property(_get_publisher, _set_publisher) # XXX compat - - -class EventReceiver(ConsumerMixin): - """Capture events. - - :param connection: Connection to the broker. - :keyword handlers: Event handlers. - - :attr:`handlers` is a dict of event types and their handlers, - the special handler `"*"` captures all events that doesn't have a - handler. - - """ - app = None - - def __init__(self, channel, handlers=None, routing_key='#', - node_id=None, app=None, queue_prefix='celeryev', - accept=None): - self.app = app_or_default(app or self.app) - self.channel = maybe_channel(channel) - self.handlers = {} if handlers is None else handlers - self.routing_key = routing_key - self.node_id = node_id or uuid() - self.queue_prefix = queue_prefix - self.exchange = get_exchange(self.connection or self.app.connection()) - self.queue = Queue('.'.join([self.queue_prefix, self.node_id]), - exchange=self.exchange, - routing_key=self.routing_key, - auto_delete=True, - durable=False, - queue_arguments=self._get_queue_arguments()) - self.clock = self.app.clock - self.adjust_clock = self.clock.adjust - self.forward_clock = self.clock.forward - if accept is None: - accept = set([self.app.conf.CELERY_EVENT_SERIALIZER, 'json']) - self.accept = accept - - def _get_queue_arguments(self): - conf = self.app.conf - return dictfilter({ - 'x-message-ttl': maybe_s_to_ms(conf.CELERY_EVENT_QUEUE_TTL), - 'x-expires': maybe_s_to_ms(conf.CELERY_EVENT_QUEUE_EXPIRES), - }) - - def process(self, type, event): - """Process the received event by dispatching it to the appropriate - handler.""" - handler = self.handlers.get(type) or self.handlers.get('*') - handler and handler(event) - - def get_consumers(self, Consumer, channel): - return [Consumer(queues=[self.queue], - callbacks=[self._receive], no_ack=True, - accept=self.accept)] - - def on_consume_ready(self, connection, channel, consumers, - wakeup=True, **kwargs): - if wakeup: - self.wakeup_workers(channel=channel) - - def itercapture(self, limit=None, timeout=None, wakeup=True): - return self.consume(limit=limit, timeout=timeout, wakeup=wakeup) - - def capture(self, limit=None, timeout=None, wakeup=True): - """Open up a consumer capturing events. - - This has to run in the main process, and it will never stop - unless :attr:`EventDispatcher.should_stop` is set to True, or - forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`. - - """ - return list(self.consume(limit=limit, timeout=timeout, wakeup=wakeup)) - - def wakeup_workers(self, channel=None): - self.app.control.broadcast('heartbeat', - connection=self.connection, - channel=channel) - - def event_from_message(self, body, localize=True, - now=time.time, tzfields=_TZGETTER, - adjust_timestamp=adjust_timestamp, - CLIENT_CLOCK_SKEW=CLIENT_CLOCK_SKEW): - type = body['type'] - if type == 'task-sent': - # clients never sync so cannot use their clock value - _c = body['clock'] = (self.clock.value or 1) + CLIENT_CLOCK_SKEW - self.adjust_clock(_c) - else: - try: - clock = body['clock'] - except KeyError: - body['clock'] = self.forward_clock() - else: - self.adjust_clock(clock) - - if localize: - try: - offset, timestamp = tzfields(body) - except KeyError: - pass - else: - body['timestamp'] = adjust_timestamp(timestamp, offset) - body['local_received'] = now() - return type, body - - def _receive(self, body, message): - self.process(*self.event_from_message(body)) - - @property - def connection(self): - return self.channel.connection.client if self.channel else None - - -class Events(object): - - def __init__(self, app=None): - self.app = app - - @cached_property - def Receiver(self): - return self.app.subclass_with_self(EventReceiver, - reverse='events.Receiver') - - @cached_property - def Dispatcher(self): - return self.app.subclass_with_self(EventDispatcher, - reverse='events.Dispatcher') - - @cached_property - def State(self): - return self.app.subclass_with_self('celery.events.state:State', - reverse='events.State') - - @contextmanager - def default_dispatcher(self, hostname=None, enabled=True, - buffer_while_offline=False): - with self.app.amqp.producer_pool.acquire(block=True) as prod: - with self.Dispatcher(prod.connection, hostname, enabled, - prod.channel, buffer_while_offline) as d: - yield d diff --git a/thesisenv/lib/python3.6/site-packages/celery/events/cursesmon.py b/thesisenv/lib/python3.6/site-packages/celery/events/cursesmon.py deleted file mode 100644 index 775f6a0..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/events/cursesmon.py +++ /dev/null @@ -1,544 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.events.cursesmon - ~~~~~~~~~~~~~~~~~~~~~~~ - - Graphical monitor of Celery events using curses. - -""" -from __future__ import absolute_import, print_function - -import curses -import sys -import threading - -from datetime import datetime -from itertools import count -from textwrap import wrap -from time import time -from math import ceil - -from celery import VERSION_BANNER -from celery import states -from celery.app import app_or_default -from celery.five import items, values -from celery.utils.text import abbr, abbrtask - -__all__ = ['CursesMonitor', 'evtop'] - -BORDER_SPACING = 4 -LEFT_BORDER_OFFSET = 3 -UUID_WIDTH = 36 -STATE_WIDTH = 8 -TIMESTAMP_WIDTH = 8 -MIN_WORKER_WIDTH = 15 -MIN_TASK_WIDTH = 16 - -# this module is considered experimental -# we don't care about coverage. - -STATUS_SCREEN = """\ -events: {s.event_count} tasks:{s.task_count} workers:{w_alive}/{w_all} -""" - - -class CursesMonitor(object): # pragma: no cover - keymap = {} - win = None - screen_width = None - screen_delay = 10 - selected_task = None - selected_position = 0 - selected_str = 'Selected: ' - foreground = curses.COLOR_BLACK - background = curses.COLOR_WHITE - online_str = 'Workers online: ' - help_title = 'Keys: ' - help = ('j:down k:up i:info t:traceback r:result c:revoke ^c: quit') - greet = 'celery events {0}'.format(VERSION_BANNER) - info_str = 'Info: ' - - def __init__(self, state, app, keymap=None): - self.app = app - self.keymap = keymap or self.keymap - self.state = state - default_keymap = {'J': self.move_selection_down, - 'K': self.move_selection_up, - 'C': self.revoke_selection, - 'T': self.selection_traceback, - 'R': self.selection_result, - 'I': self.selection_info, - 'L': self.selection_rate_limit} - self.keymap = dict(default_keymap, **self.keymap) - self.lock = threading.RLock() - - def format_row(self, uuid, task, worker, timestamp, state): - mx = self.display_width - - # include spacing - detail_width = mx - 1 - STATE_WIDTH - 1 - TIMESTAMP_WIDTH - uuid_space = detail_width - 1 - MIN_TASK_WIDTH - 1 - MIN_WORKER_WIDTH - - if uuid_space < UUID_WIDTH: - uuid_width = uuid_space - else: - uuid_width = UUID_WIDTH - - detail_width = detail_width - uuid_width - 1 - task_width = int(ceil(detail_width / 2.0)) - worker_width = detail_width - task_width - 1 - - uuid = abbr(uuid, uuid_width).ljust(uuid_width) - worker = abbr(worker, worker_width).ljust(worker_width) - task = abbrtask(task, task_width).ljust(task_width) - state = abbr(state, STATE_WIDTH).ljust(STATE_WIDTH) - timestamp = timestamp.ljust(TIMESTAMP_WIDTH) - - row = '{0} {1} {2} {3} {4} '.format(uuid, worker, task, - timestamp, state) - if self.screen_width is None: - self.screen_width = len(row[:mx]) - return row[:mx] - - @property - def screen_width(self): - _, mx = self.win.getmaxyx() - return mx - - @property - def screen_height(self): - my, _ = self.win.getmaxyx() - return my - - @property - def display_width(self): - _, mx = self.win.getmaxyx() - return mx - BORDER_SPACING - - @property - def display_height(self): - my, _ = self.win.getmaxyx() - return my - 10 - - @property - def limit(self): - return self.display_height - - def find_position(self): - if not self.tasks: - return 0 - for i, e in enumerate(self.tasks): - if self.selected_task == e[0]: - return i - return 0 - - def move_selection_up(self): - self.move_selection(-1) - - def move_selection_down(self): - self.move_selection(1) - - def move_selection(self, direction=1): - if not self.tasks: - return - pos = self.find_position() - try: - self.selected_task = self.tasks[pos + direction][0] - except IndexError: - self.selected_task = self.tasks[0][0] - - keyalias = {curses.KEY_DOWN: 'J', - curses.KEY_UP: 'K', - curses.KEY_ENTER: 'I'} - - def handle_keypress(self): - try: - key = self.win.getkey().upper() - except: - return - key = self.keyalias.get(key) or key - handler = self.keymap.get(key) - if handler is not None: - handler() - - def alert(self, callback, title=None): - self.win.erase() - my, mx = self.win.getmaxyx() - y = blank_line = count(2) - if title: - self.win.addstr(next(y), 3, title, - curses.A_BOLD | curses.A_UNDERLINE) - next(blank_line) - callback(my, mx, next(y)) - self.win.addstr(my - 1, 0, 'Press any key to continue...', - curses.A_BOLD) - self.win.refresh() - while 1: - try: - return self.win.getkey().upper() - except: - pass - - def selection_rate_limit(self): - if not self.selected_task: - return curses.beep() - task = self.state.tasks[self.selected_task] - if not task.name: - return curses.beep() - - my, mx = self.win.getmaxyx() - r = 'New rate limit: ' - self.win.addstr(my - 2, 3, r, curses.A_BOLD | curses.A_UNDERLINE) - self.win.addstr(my - 2, len(r) + 3, ' ' * (mx - len(r))) - rlimit = self.readline(my - 2, 3 + len(r)) - - if rlimit: - reply = self.app.control.rate_limit(task.name, - rlimit.strip(), reply=True) - self.alert_remote_control_reply(reply) - - def alert_remote_control_reply(self, reply): - - def callback(my, mx, xs): - y = count(xs) - if not reply: - self.win.addstr( - next(y), 3, 'No replies received in 1s deadline.', - curses.A_BOLD + curses.color_pair(2), - ) - return - - for subreply in reply: - curline = next(y) - - host, response = next(items(subreply)) - host = '{0}: '.format(host) - self.win.addstr(curline, 3, host, curses.A_BOLD) - attr = curses.A_NORMAL - text = '' - if 'error' in response: - text = response['error'] - attr |= curses.color_pair(2) - elif 'ok' in response: - text = response['ok'] - attr |= curses.color_pair(3) - self.win.addstr(curline, 3 + len(host), text, attr) - - return self.alert(callback, 'Remote Control Command Replies') - - def readline(self, x, y): - buffer = str() - curses.echo() - try: - i = 0 - while 1: - ch = self.win.getch(x, y + i) - if ch != -1: - if ch in (10, curses.KEY_ENTER): # enter - break - if ch in (27, ): - buffer = str() - break - buffer += chr(ch) - i += 1 - finally: - curses.noecho() - return buffer - - def revoke_selection(self): - if not self.selected_task: - return curses.beep() - reply = self.app.control.revoke(self.selected_task, reply=True) - self.alert_remote_control_reply(reply) - - def selection_info(self): - if not self.selected_task: - return - - def alert_callback(mx, my, xs): - my, mx = self.win.getmaxyx() - y = count(xs) - task = self.state.tasks[self.selected_task] - info = task.info(extra=['state']) - infoitems = [ - ('args', info.pop('args', None)), - ('kwargs', info.pop('kwargs', None)) - ] + list(info.items()) - for key, value in infoitems: - if key is None: - continue - value = str(value) - curline = next(y) - keys = key + ': ' - self.win.addstr(curline, 3, keys, curses.A_BOLD) - wrapped = wrap(value, mx - 2) - if len(wrapped) == 1: - self.win.addstr( - curline, len(keys) + 3, - abbr(wrapped[0], - self.screen_width - (len(keys) + 3))) - else: - for subline in wrapped: - nexty = next(y) - if nexty >= my - 1: - subline = ' ' * 4 + '[...]' - elif nexty >= my: - break - self.win.addstr( - nexty, 3, - abbr(' ' * 4 + subline, self.screen_width - 4), - curses.A_NORMAL, - ) - - return self.alert( - alert_callback, 'Task details for {0.selected_task}'.format(self), - ) - - def selection_traceback(self): - if not self.selected_task: - return curses.beep() - task = self.state.tasks[self.selected_task] - if task.state not in states.EXCEPTION_STATES: - return curses.beep() - - def alert_callback(my, mx, xs): - y = count(xs) - for line in task.traceback.split('\n'): - self.win.addstr(next(y), 3, line) - - return self.alert( - alert_callback, - 'Task Exception Traceback for {0.selected_task}'.format(self), - ) - - def selection_result(self): - if not self.selected_task: - return - - def alert_callback(my, mx, xs): - y = count(xs) - task = self.state.tasks[self.selected_task] - result = (getattr(task, 'result', None) or - getattr(task, 'exception', None)) - for line in wrap(result or '', mx - 2): - self.win.addstr(next(y), 3, line) - - return self.alert( - alert_callback, - 'Task Result for {0.selected_task}'.format(self), - ) - - def display_task_row(self, lineno, task): - state_color = self.state_colors.get(task.state) - attr = curses.A_NORMAL - if task.uuid == self.selected_task: - attr = curses.A_STANDOUT - timestamp = datetime.utcfromtimestamp( - task.timestamp or time(), - ) - timef = timestamp.strftime('%H:%M:%S') - hostname = task.worker.hostname if task.worker else '*NONE*' - line = self.format_row(task.uuid, task.name, - hostname, - timef, task.state) - self.win.addstr(lineno, LEFT_BORDER_OFFSET, line, attr) - - if state_color: - self.win.addstr(lineno, - len(line) - STATE_WIDTH + BORDER_SPACING - 1, - task.state, state_color | attr) - - def draw(self): - with self.lock: - win = self.win - self.handle_keypress() - x = LEFT_BORDER_OFFSET - y = blank_line = count(2) - my, mx = win.getmaxyx() - win.erase() - win.bkgd(' ', curses.color_pair(1)) - win.border() - win.addstr(1, x, self.greet, curses.A_DIM | curses.color_pair(5)) - next(blank_line) - win.addstr(next(y), x, self.format_row('UUID', 'TASK', - 'WORKER', 'TIME', 'STATE'), - curses.A_BOLD | curses.A_UNDERLINE) - tasks = self.tasks - if tasks: - for row, (uuid, task) in enumerate(tasks): - if row > self.display_height: - break - - if task.uuid: - lineno = next(y) - self.display_task_row(lineno, task) - - # -- Footer - next(blank_line) - win.hline(my - 6, x, curses.ACS_HLINE, self.screen_width - 4) - - # Selected Task Info - if self.selected_task: - win.addstr(my - 5, x, self.selected_str, curses.A_BOLD) - info = 'Missing extended info' - detail = '' - try: - selection = self.state.tasks[self.selected_task] - except KeyError: - pass - else: - info = selection.info() - if 'runtime' in info: - info['runtime'] = '{0:.2f}'.format(info['runtime']) - if 'result' in info: - info['result'] = abbr(info['result'], 16) - info = ' '.join( - '{0}={1}'.format(key, value) - for key, value in items(info) - ) - detail = '... -> key i' - infowin = abbr(info, - self.screen_width - len(self.selected_str) - 2, - detail) - win.addstr(my - 5, x + len(self.selected_str), infowin) - # Make ellipsis bold - if detail in infowin: - detailpos = len(infowin) - len(detail) - win.addstr(my - 5, x + len(self.selected_str) + detailpos, - detail, curses.A_BOLD) - else: - win.addstr(my - 5, x, 'No task selected', curses.A_NORMAL) - - # Workers - if self.workers: - win.addstr(my - 4, x, self.online_str, curses.A_BOLD) - win.addstr(my - 4, x + len(self.online_str), - ', '.join(sorted(self.workers)), curses.A_NORMAL) - else: - win.addstr(my - 4, x, 'No workers discovered.') - - # Info - win.addstr(my - 3, x, self.info_str, curses.A_BOLD) - win.addstr( - my - 3, x + len(self.info_str), - STATUS_SCREEN.format( - s=self.state, - w_alive=len([w for w in values(self.state.workers) - if w.alive]), - w_all=len(self.state.workers), - ), - curses.A_DIM, - ) - - # Help - self.safe_add_str(my - 2, x, self.help_title, curses.A_BOLD) - self.safe_add_str(my - 2, x + len(self.help_title), self.help, - curses.A_DIM) - win.refresh() - - def safe_add_str(self, y, x, string, *args, **kwargs): - if x + len(string) > self.screen_width: - string = string[:self.screen_width - x] - self.win.addstr(y, x, string, *args, **kwargs) - - def init_screen(self): - with self.lock: - self.win = curses.initscr() - self.win.nodelay(True) - self.win.keypad(True) - curses.start_color() - curses.init_pair(1, self.foreground, self.background) - # exception states - curses.init_pair(2, curses.COLOR_RED, self.background) - # successful state - curses.init_pair(3, curses.COLOR_GREEN, self.background) - # revoked state - curses.init_pair(4, curses.COLOR_MAGENTA, self.background) - # greeting - curses.init_pair(5, curses.COLOR_BLUE, self.background) - # started state - curses.init_pair(6, curses.COLOR_YELLOW, self.foreground) - - self.state_colors = {states.SUCCESS: curses.color_pair(3), - states.REVOKED: curses.color_pair(4), - states.STARTED: curses.color_pair(6)} - for state in states.EXCEPTION_STATES: - self.state_colors[state] = curses.color_pair(2) - - curses.cbreak() - - def resetscreen(self): - with self.lock: - curses.nocbreak() - self.win.keypad(False) - curses.echo() - curses.endwin() - - def nap(self): - curses.napms(self.screen_delay) - - @property - def tasks(self): - return list(self.state.tasks_by_time(limit=self.limit)) - - @property - def workers(self): - return [hostname for hostname, w in items(self.state.workers) - if w.alive] - - -class DisplayThread(threading.Thread): # pragma: no cover - - def __init__(self, display): - self.display = display - self.shutdown = False - threading.Thread.__init__(self) - - def run(self): - while not self.shutdown: - self.display.draw() - self.display.nap() - - -def capture_events(app, state, display): # pragma: no cover - - def on_connection_error(exc, interval): - print('Connection Error: {0!r}. Retry in {1}s.'.format( - exc, interval), file=sys.stderr) - - while 1: - print('-> evtop: starting capture...', file=sys.stderr) - with app.connection() as conn: - try: - conn.ensure_connection(on_connection_error, - app.conf.BROKER_CONNECTION_MAX_RETRIES) - recv = app.events.Receiver(conn, handlers={'*': state.event}) - display.resetscreen() - display.init_screen() - recv.capture() - except conn.connection_errors + conn.channel_errors as exc: - print('Connection lost: {0!r}'.format(exc), file=sys.stderr) - - -def evtop(app=None): # pragma: no cover - app = app_or_default(app) - state = app.events.State() - display = CursesMonitor(state, app) - display.init_screen() - refresher = DisplayThread(display) - refresher.start() - try: - capture_events(app, state, display) - except Exception: - refresher.shutdown = True - refresher.join() - display.resetscreen() - raise - except (KeyboardInterrupt, SystemExit): - refresher.shutdown = True - refresher.join() - display.resetscreen() - - -if __name__ == '__main__': # pragma: no cover - evtop() diff --git a/thesisenv/lib/python3.6/site-packages/celery/events/dumper.py b/thesisenv/lib/python3.6/site-packages/celery/events/dumper.py deleted file mode 100644 index 323afc4..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/events/dumper.py +++ /dev/null @@ -1,109 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.events.dumper - ~~~~~~~~~~~~~~~~~~~~ - - This is a simple program that dumps events to the console - as they happen. Think of it like a `tcpdump` for Celery events. - -""" -from __future__ import absolute_import, print_function - -import sys - -from datetime import datetime - -from celery.app import app_or_default -from celery.utils.functional import LRUCache -from celery.utils.timeutils import humanize_seconds - -__all__ = ['Dumper', 'evdump'] - -TASK_NAMES = LRUCache(limit=0xFFF) - -HUMAN_TYPES = {'worker-offline': 'shutdown', - 'worker-online': 'started', - 'worker-heartbeat': 'heartbeat'} - -CONNECTION_ERROR = """\ --> Cannot connect to %s: %s. -Trying again %s -""" - - -def humanize_type(type): - try: - return HUMAN_TYPES[type.lower()] - except KeyError: - return type.lower().replace('-', ' ') - - -class Dumper(object): - - def __init__(self, out=sys.stdout): - self.out = out - - def say(self, msg): - print(msg, file=self.out) - # need to flush so that output can be piped. - try: - self.out.flush() - except AttributeError: - pass - - def on_event(self, ev): - timestamp = datetime.utcfromtimestamp(ev.pop('timestamp')) - type = ev.pop('type').lower() - hostname = ev.pop('hostname') - if type.startswith('task-'): - uuid = ev.pop('uuid') - if type in ('task-received', 'task-sent'): - task = TASK_NAMES[uuid] = '{0}({1}) args={2} kwargs={3}' \ - .format(ev.pop('name'), uuid, - ev.pop('args'), - ev.pop('kwargs')) - else: - task = TASK_NAMES.get(uuid, '') - return self.format_task_event(hostname, timestamp, - type, task, ev) - fields = ', '.join( - '{0}={1}'.format(key, ev[key]) for key in sorted(ev) - ) - sep = fields and ':' or '' - self.say('{0} [{1}] {2}{3} {4}'.format( - hostname, timestamp, humanize_type(type), sep, fields), - ) - - def format_task_event(self, hostname, timestamp, type, task, event): - fields = ', '.join( - '{0}={1}'.format(key, event[key]) for key in sorted(event) - ) - sep = fields and ':' or '' - self.say('{0} [{1}] {2}{3} {4} {5}'.format( - hostname, timestamp, humanize_type(type), sep, task, fields), - ) - - -def evdump(app=None, out=sys.stdout): - app = app_or_default(app) - dumper = Dumper(out=out) - dumper.say('-> evdump: starting capture...') - conn = app.connection().clone() - - def _error_handler(exc, interval): - dumper.say(CONNECTION_ERROR % ( - conn.as_uri(), exc, humanize_seconds(interval, 'in', ' ') - )) - - while 1: - try: - conn.ensure_connection(_error_handler) - recv = app.events.Receiver(conn, handlers={'*': dumper.on_event}) - recv.capture() - except (KeyboardInterrupt, SystemExit): - return conn and conn.close() - except conn.connection_errors + conn.channel_errors: - dumper.say('-> Connection lost, attempting reconnect') - -if __name__ == '__main__': # pragma: no cover - evdump() diff --git a/thesisenv/lib/python3.6/site-packages/celery/events/snapshot.py b/thesisenv/lib/python3.6/site-packages/celery/events/snapshot.py deleted file mode 100644 index 0dd4155..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/events/snapshot.py +++ /dev/null @@ -1,114 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.events.snapshot - ~~~~~~~~~~~~~~~~~~~~~~ - - Consuming the events as a stream is not always suitable - so this module implements a system to take snapshots of the - state of a cluster at regular intervals. There is a full - implementation of this writing the snapshots to a database - in :mod:`djcelery.snapshots` in the `django-celery` distribution. - -""" -from __future__ import absolute_import - -from kombu.utils.limits import TokenBucket - -from celery import platforms -from celery.app import app_or_default -from celery.utils.timer2 import Timer -from celery.utils.dispatch import Signal -from celery.utils.imports import instantiate -from celery.utils.log import get_logger -from celery.utils.timeutils import rate - -__all__ = ['Polaroid', 'evcam'] - -logger = get_logger('celery.evcam') - - -class Polaroid(object): - timer = None - shutter_signal = Signal(providing_args=('state', )) - cleanup_signal = Signal() - clear_after = False - - _tref = None - _ctref = None - - def __init__(self, state, freq=1.0, maxrate=None, - cleanup_freq=3600.0, timer=None, app=None): - self.app = app_or_default(app) - self.state = state - self.freq = freq - self.cleanup_freq = cleanup_freq - self.timer = timer or self.timer or Timer() - self.logger = logger - self.maxrate = maxrate and TokenBucket(rate(maxrate)) - - def install(self): - self._tref = self.timer.call_repeatedly(self.freq, self.capture) - self._ctref = self.timer.call_repeatedly( - self.cleanup_freq, self.cleanup, - ) - - def on_shutter(self, state): - pass - - def on_cleanup(self): - pass - - def cleanup(self): - logger.debug('Cleanup: Running...') - self.cleanup_signal.send(None) - self.on_cleanup() - - def shutter(self): - if self.maxrate is None or self.maxrate.can_consume(): - logger.debug('Shutter: %s', self.state) - self.shutter_signal.send(self.state) - self.on_shutter(self.state) - - def capture(self): - self.state.freeze_while(self.shutter, clear_after=self.clear_after) - - def cancel(self): - if self._tref: - self._tref() # flush all received events. - self._tref.cancel() - if self._ctref: - self._ctref.cancel() - - def __enter__(self): - self.install() - return self - - def __exit__(self, *exc_info): - self.cancel() - - -def evcam(camera, freq=1.0, maxrate=None, loglevel=0, - logfile=None, pidfile=None, timer=None, app=None): - app = app_or_default(app) - - if pidfile: - platforms.create_pidlock(pidfile) - - app.log.setup_logging_subsystem(loglevel, logfile) - - print('-> evcam: Taking snapshots with {0} (every {1} secs.)'.format( - camera, freq)) - state = app.events.State() - cam = instantiate(camera, state, app=app, freq=freq, - maxrate=maxrate, timer=timer) - cam.install() - conn = app.connection() - recv = app.events.Receiver(conn, handlers={'*': state.event}) - try: - try: - recv.capture(limit=None) - except KeyboardInterrupt: - raise SystemExit - finally: - cam.cancel() - conn.close() diff --git a/thesisenv/lib/python3.6/site-packages/celery/events/state.py b/thesisenv/lib/python3.6/site-packages/celery/events/state.py deleted file mode 100644 index c78f2d0..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/events/state.py +++ /dev/null @@ -1,656 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.events.state - ~~~~~~~~~~~~~~~~~~~ - - This module implements a datastructure used to keep - track of the state of a cluster of workers and the tasks - it is working on (by consuming events). - - For every event consumed the state is updated, - so the state represents the state of the cluster - at the time of the last event. - - Snapshots (:mod:`celery.events.snapshot`) can be used to - take "pictures" of this state at regular intervals - to e.g. store that in a database. - -""" -from __future__ import absolute_import - -import bisect -import sys -import threading - -from datetime import datetime -from decimal import Decimal -from itertools import islice -from operator import itemgetter -from time import time -from weakref import ref - -from kombu.clocks import timetuple -from kombu.utils import cached_property, kwdict - -from celery import states -from celery.five import class_property, items, values -from celery.utils import deprecated -from celery.utils.functional import LRUCache, memoize -from celery.utils.log import get_logger - -PYPY = hasattr(sys, 'pypy_version_info') - -# The window (in percentage) is added to the workers heartbeat -# frequency. If the time between updates exceeds this window, -# then the worker is considered to be offline. -HEARTBEAT_EXPIRE_WINDOW = 200 - -# Max drift between event timestamp and time of event received -# before we alert that clocks may be unsynchronized. -HEARTBEAT_DRIFT_MAX = 16 - -DRIFT_WARNING = """\ -Substantial drift from %s may mean clocks are out of sync. Current drift is -%s seconds. [orig: %s recv: %s] -""" - -CAN_KWDICT = sys.version_info >= (2, 6, 5) - -logger = get_logger(__name__) -warn = logger.warning - -R_STATE = '' -R_WORKER = ' HEARTBEAT_DRIFT_MAX: - _warn_drift(self.hostname, drift, - local_received, timestamp) - if local_received: - hearts = len(heartbeats) - if hearts > hbmax - 1: - hb_pop(0) - if hearts and local_received > heartbeats[-1]: - hb_append(local_received) - else: - insort(heartbeats, local_received) - return event - - def update(self, f, **kw): - for k, v in items(dict(f, **kw) if kw else f): - setattr(self, k, v) - - def __repr__(self): - return R_WORKER.format(self) - - @property - def status_string(self): - return 'ONLINE' if self.alive else 'OFFLINE' - - @property - def heartbeat_expires(self): - return heartbeat_expires(self.heartbeats[-1], - self.freq, self.expire_window) - - @property - def alive(self, nowfun=time): - return bool(self.heartbeats and nowfun() < self.heartbeat_expires) - - @property - def id(self): - return '{0.hostname}.{0.pid}'.format(self) - - @deprecated(3.2, 3.3) - def update_heartbeat(self, received, timestamp): - self.event(None, timestamp, received) - - @deprecated(3.2, 3.3) - def on_online(self, timestamp=None, local_received=None, **fields): - self.event('online', timestamp, local_received, fields) - - @deprecated(3.2, 3.3) - def on_offline(self, timestamp=None, local_received=None, **fields): - self.event('offline', timestamp, local_received, fields) - - @deprecated(3.2, 3.3) - def on_heartbeat(self, timestamp=None, local_received=None, **fields): - self.event('heartbeat', timestamp, local_received, fields) - - @class_property - def _defaults(cls): - """Deprecated, to be removed in 3.3""" - source = cls() - return dict((k, getattr(source, k)) for k in cls._fields) - - -@with_unique_field('uuid') -class Task(object): - """Task State.""" - name = received = sent = started = succeeded = failed = retried = \ - revoked = args = kwargs = eta = expires = retries = worker = result = \ - exception = timestamp = runtime = traceback = exchange = \ - routing_key = client = None - state = states.PENDING - clock = 0 - - _fields = ('uuid', 'name', 'state', 'received', 'sent', 'started', - 'succeeded', 'failed', 'retried', 'revoked', 'args', 'kwargs', - 'eta', 'expires', 'retries', 'worker', 'result', 'exception', - 'timestamp', 'runtime', 'traceback', 'exchange', 'routing_key', - 'clock', 'client') - if not PYPY: - __slots__ = ('__dict__', '__weakref__') - - #: How to merge out of order events. - #: Disorder is detected by logical ordering (e.g. :event:`task-received` - #: must have happened before a :event:`task-failed` event). - #: - #: A merge rule consists of a state and a list of fields to keep from - #: that state. ``(RECEIVED, ('name', 'args')``, means the name and args - #: fields are always taken from the RECEIVED state, and any values for - #: these fields received before or after is simply ignored. - merge_rules = {states.RECEIVED: ('name', 'args', 'kwargs', - 'retries', 'eta', 'expires')} - - #: meth:`info` displays these fields by default. - _info_fields = ('args', 'kwargs', 'retries', 'result', 'eta', 'runtime', - 'expires', 'exception', 'exchange', 'routing_key') - - def __init__(self, uuid=None, **kwargs): - self.uuid = uuid - if kwargs: - for k, v in items(kwargs): - setattr(self, k, v) - - def event(self, type_, timestamp=None, local_received=None, fields=None, - precedence=states.precedence, items=items, dict=dict, - PENDING=states.PENDING, RECEIVED=states.RECEIVED, - STARTED=states.STARTED, FAILURE=states.FAILURE, - RETRY=states.RETRY, SUCCESS=states.SUCCESS, - REVOKED=states.REVOKED): - fields = fields or {} - if type_ == 'sent': - state, self.sent = PENDING, timestamp - elif type_ == 'received': - state, self.received = RECEIVED, timestamp - elif type_ == 'started': - state, self.started = STARTED, timestamp - elif type_ == 'failed': - state, self.failed = FAILURE, timestamp - elif type_ == 'retried': - state, self.retried = RETRY, timestamp - elif type_ == 'succeeded': - state, self.succeeded = SUCCESS, timestamp - elif type_ == 'revoked': - state, self.revoked = REVOKED, timestamp - else: - state = type_.upper() - - # note that precedence here is reversed - # see implementation in celery.states.state.__lt__ - if state != RETRY and self.state != RETRY and \ - precedence(state) > precedence(self.state): - # this state logically happens-before the current state, so merge. - keep = self.merge_rules.get(state) - if keep is not None: - fields = dict( - (k, v) for k, v in items(fields) if k in keep - ) - for key, value in items(fields): - setattr(self, key, value) - else: - self.state = state - self.timestamp = timestamp - for key, value in items(fields): - setattr(self, key, value) - - def info(self, fields=None, extra=[]): - """Information about this task suitable for on-screen display.""" - fields = self._info_fields if fields is None else fields - - def _keys(): - for key in list(fields) + list(extra): - value = getattr(self, key, None) - if value is not None: - yield key, value - - return dict(_keys()) - - def __repr__(self): - return R_TASK.format(self) - - def as_dict(self): - get = object.__getattribute__ - return dict( - (k, get(self, k)) for k in self._fields - ) - - def __reduce__(self): - return _depickle_task, (self.__class__, self.as_dict()) - - @property - def origin(self): - return self.client if self.worker is None else self.worker.id - - @property - def ready(self): - return self.state in states.READY_STATES - - @deprecated(3.2, 3.3) - def on_sent(self, timestamp=None, **fields): - self.event('sent', timestamp, fields) - - @deprecated(3.2, 3.3) - def on_received(self, timestamp=None, **fields): - self.event('received', timestamp, fields) - - @deprecated(3.2, 3.3) - def on_started(self, timestamp=None, **fields): - self.event('started', timestamp, fields) - - @deprecated(3.2, 3.3) - def on_failed(self, timestamp=None, **fields): - self.event('failed', timestamp, fields) - - @deprecated(3.2, 3.3) - def on_retried(self, timestamp=None, **fields): - self.event('retried', timestamp, fields) - - @deprecated(3.2, 3.3) - def on_succeeded(self, timestamp=None, **fields): - self.event('succeeded', timestamp, fields) - - @deprecated(3.2, 3.3) - def on_revoked(self, timestamp=None, **fields): - self.event('revoked', timestamp, fields) - - @deprecated(3.2, 3.3) - def on_unknown_event(self, shortype, timestamp=None, **fields): - self.event(shortype, timestamp, fields) - - @deprecated(3.2, 3.3) - def update(self, state, timestamp, fields, - _state=states.state, RETRY=states.RETRY): - return self.event(state, timestamp, None, fields) - - @deprecated(3.2, 3.3) - def merge(self, state, timestamp, fields): - keep = self.merge_rules.get(state) - if keep is not None: - fields = dict((k, v) for k, v in items(fields) if k in keep) - for key, value in items(fields): - setattr(self, key, value) - - @class_property - def _defaults(cls): - """Deprecated, to be removed in 3.3.""" - source = cls() - return dict((k, getattr(source, k)) for k in source._fields) - - -class State(object): - """Records clusters state.""" - Worker = Worker - Task = Task - event_count = 0 - task_count = 0 - heap_multiplier = 4 - - def __init__(self, callback=None, - workers=None, tasks=None, taskheap=None, - max_workers_in_memory=5000, max_tasks_in_memory=10000, - on_node_join=None, on_node_leave=None): - self.event_callback = callback - self.workers = (LRUCache(max_workers_in_memory) - if workers is None else workers) - self.tasks = (LRUCache(max_tasks_in_memory) - if tasks is None else tasks) - self._taskheap = [] if taskheap is None else taskheap - self.max_workers_in_memory = max_workers_in_memory - self.max_tasks_in_memory = max_tasks_in_memory - self.on_node_join = on_node_join - self.on_node_leave = on_node_leave - self._mutex = threading.Lock() - self.handlers = {} - self._seen_types = set() - self.rebuild_taskheap() - - @cached_property - def _event(self): - return self._create_dispatcher() - - def freeze_while(self, fun, *args, **kwargs): - clear_after = kwargs.pop('clear_after', False) - with self._mutex: - try: - return fun(*args, **kwargs) - finally: - if clear_after: - self._clear() - - def clear_tasks(self, ready=True): - with self._mutex: - return self._clear_tasks(ready) - - def _clear_tasks(self, ready=True): - if ready: - in_progress = dict( - (uuid, task) for uuid, task in self.itertasks() - if task.state not in states.READY_STATES) - self.tasks.clear() - self.tasks.update(in_progress) - else: - self.tasks.clear() - self._taskheap[:] = [] - - def _clear(self, ready=True): - self.workers.clear() - self._clear_tasks(ready) - self.event_count = 0 - self.task_count = 0 - - def clear(self, ready=True): - with self._mutex: - return self._clear(ready) - - def get_or_create_worker(self, hostname, **kwargs): - """Get or create worker by hostname. - - Return tuple of ``(worker, was_created)``. - """ - try: - worker = self.workers[hostname] - if kwargs: - worker.update(kwargs) - return worker, False - except KeyError: - worker = self.workers[hostname] = self.Worker( - hostname, **kwargs) - return worker, True - - def get_or_create_task(self, uuid): - """Get or create task by uuid.""" - try: - return self.tasks[uuid], False - except KeyError: - task = self.tasks[uuid] = self.Task(uuid) - return task, True - - def event(self, event): - with self._mutex: - return self._event(event) - - def task_event(self, type_, fields): - """Deprecated, use :meth:`event`.""" - return self._event(dict(fields, type='-'.join(['task', type_])))[0] - - def worker_event(self, type_, fields): - """Deprecated, use :meth:`event`.""" - return self._event(dict(fields, type='-'.join(['worker', type_])))[0] - - def _create_dispatcher(self): - get_handler = self.handlers.__getitem__ - event_callback = self.event_callback - wfields = itemgetter('hostname', 'timestamp', 'local_received') - tfields = itemgetter('uuid', 'hostname', 'timestamp', - 'local_received', 'clock') - taskheap = self._taskheap - th_append = taskheap.append - th_pop = taskheap.pop - # Removing events from task heap is an O(n) operation, - # so easier to just account for the common number of events - # for each task (PENDING->RECEIVED->STARTED->final) - #: an O(n) operation - max_events_in_heap = self.max_tasks_in_memory * self.heap_multiplier - add_type = self._seen_types.add - on_node_join, on_node_leave = self.on_node_join, self.on_node_leave - tasks, Task = self.tasks, self.Task - workers, Worker = self.workers, self.Worker - # avoid updating LRU entry at getitem - get_worker, get_task = workers.data.__getitem__, tasks.data.__getitem__ - - def _event(event, - timetuple=timetuple, KeyError=KeyError, - insort=bisect.insort, created=True): - self.event_count += 1 - if event_callback: - event_callback(self, event) - group, _, subject = event['type'].partition('-') - try: - handler = get_handler(group) - except KeyError: - pass - else: - return handler(subject, event), subject - - if group == 'worker': - try: - hostname, timestamp, local_received = wfields(event) - except KeyError: - pass - else: - is_offline = subject == 'offline' - try: - worker, created = get_worker(hostname), False - except KeyError: - if is_offline: - worker, created = Worker(hostname), False - else: - worker = workers[hostname] = Worker(hostname) - worker.event(subject, timestamp, local_received, event) - if on_node_join and (created or subject == 'online'): - on_node_join(worker) - if on_node_leave and is_offline: - on_node_leave(worker) - workers.pop(hostname, None) - return (worker, created), subject - elif group == 'task': - (uuid, hostname, timestamp, - local_received, clock) = tfields(event) - # task-sent event is sent by client, not worker - is_client_event = subject == 'sent' - try: - task, created = get_task(uuid), False - except KeyError: - task = tasks[uuid] = Task(uuid) - if is_client_event: - task.client = hostname - else: - try: - worker, created = get_worker(hostname), False - except KeyError: - worker = workers[hostname] = Worker(hostname) - task.worker = worker - if worker is not None and local_received: - worker.event(None, local_received, timestamp) - - origin = hostname if is_client_event else worker.id - - # remove oldest event if exceeding the limit. - heaps = len(taskheap) - if heaps + 1 > max_events_in_heap: - th_pop(0) - - # most events will be dated later than the previous. - timetup = timetuple(clock, timestamp, origin, ref(task)) - if heaps and timetup > taskheap[-1]: - th_append(timetup) - else: - insort(taskheap, timetup) - - if subject == 'received': - self.task_count += 1 - task.event(subject, timestamp, local_received, event) - task_name = task.name - if task_name is not None: - add_type(task_name) - return (task, created), subject - return _event - - def rebuild_taskheap(self, timetuple=timetuple): - heap = self._taskheap[:] = [ - timetuple(t.clock, t.timestamp, t.origin, ref(t)) - for t in values(self.tasks) - ] - heap.sort() - - def itertasks(self, limit=None): - for index, row in enumerate(items(self.tasks)): - yield row - if limit and index + 1 >= limit: - break - - def tasks_by_time(self, limit=None): - """Generator giving tasks ordered by time, - in ``(uuid, Task)`` tuples.""" - seen = set() - for evtup in islice(reversed(self._taskheap), 0, limit): - task = evtup[3]() - if task is not None: - uuid = task.uuid - if uuid not in seen: - yield uuid, task - seen.add(uuid) - tasks_by_timestamp = tasks_by_time - - def tasks_by_type(self, name, limit=None): - """Get all tasks by type. - - Return a list of ``(uuid, Task)`` tuples. - - """ - return islice( - ((uuid, task) for uuid, task in self.tasks_by_time() - if task.name == name), - 0, limit, - ) - - def tasks_by_worker(self, hostname, limit=None): - """Get all tasks by worker. - - """ - return islice( - ((uuid, task) for uuid, task in self.tasks_by_time() - if task.worker.hostname == hostname), - 0, limit, - ) - - def task_types(self): - """Return a list of all seen task types.""" - return sorted(self._seen_types) - - def alive_workers(self): - """Return a list of (seemingly) alive workers.""" - return [w for w in values(self.workers) if w.alive] - - def __repr__(self): - return R_STATE.format(self) - - def __reduce__(self): - return self.__class__, ( - self.event_callback, self.workers, self.tasks, None, - self.max_workers_in_memory, self.max_tasks_in_memory, - self.on_node_join, self.on_node_leave, - ) diff --git a/thesisenv/lib/python3.6/site-packages/celery/exceptions.py b/thesisenv/lib/python3.6/site-packages/celery/exceptions.py deleted file mode 100644 index ab65019..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/exceptions.py +++ /dev/null @@ -1,171 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.exceptions - ~~~~~~~~~~~~~~~~~ - - This module contains all exceptions used by the Celery API. - -""" -from __future__ import absolute_import - -import numbers - -from .five import string_t - -from billiard.exceptions import ( # noqa - SoftTimeLimitExceeded, TimeLimitExceeded, WorkerLostError, Terminated, -) - -__all__ = ['SecurityError', 'Ignore', 'QueueNotFound', - 'WorkerShutdown', 'WorkerTerminate', - 'ImproperlyConfigured', 'NotRegistered', 'AlreadyRegistered', - 'TimeoutError', 'MaxRetriesExceededError', 'Retry', - 'TaskRevokedError', 'NotConfigured', 'AlwaysEagerIgnored', - 'InvalidTaskError', 'ChordError', 'CPendingDeprecationWarning', - 'CDeprecationWarning', 'FixupWarning', 'DuplicateNodenameWarning', - 'SoftTimeLimitExceeded', 'TimeLimitExceeded', 'WorkerLostError', - 'Terminated'] - -UNREGISTERED_FMT = """\ -Task of kind {0} is not registered, please make sure it's imported.\ -""" - - -class SecurityError(Exception): - """Security related exceptions. - - Handle with care. - - """ - - -class Ignore(Exception): - """A task can raise this to ignore doing state updates.""" - - -class Reject(Exception): - """A task can raise this if it wants to reject/requeue the message.""" - - def __init__(self, reason=None, requeue=False): - self.reason = reason - self.requeue = requeue - super(Reject, self).__init__(reason, requeue) - - def __repr__(self): - return 'reject requeue=%s: %s' % (self.requeue, self.reason) - - -class WorkerTerminate(SystemExit): - """Signals that the worker should terminate immediately.""" -SystemTerminate = WorkerTerminate # XXX compat - - -class WorkerShutdown(SystemExit): - """Signals that the worker should perform a warm shutdown.""" - - -class QueueNotFound(KeyError): - """Task routed to a queue not in CELERY_QUEUES.""" - - -class ImproperlyConfigured(ImportError): - """Celery is somehow improperly configured.""" - - -class NotRegistered(KeyError): - """The task is not registered.""" - - def __repr__(self): - return UNREGISTERED_FMT.format(self) - - -class AlreadyRegistered(Exception): - """The task is already registered.""" - - -class TimeoutError(Exception): - """The operation timed out.""" - - -class MaxRetriesExceededError(Exception): - """The tasks max restart limit has been exceeded.""" - - -class Retry(Exception): - """The task is to be retried later.""" - - #: Optional message describing context of retry. - message = None - - #: Exception (if any) that caused the retry to happen. - exc = None - - #: Time of retry (ETA), either :class:`numbers.Real` or - #: :class:`~datetime.datetime`. - when = None - - def __init__(self, message=None, exc=None, when=None, **kwargs): - from kombu.utils.encoding import safe_repr - self.message = message - if isinstance(exc, string_t): - self.exc, self.excs = None, exc - else: - self.exc, self.excs = exc, safe_repr(exc) if exc else None - self.when = when - Exception.__init__(self, exc, when, **kwargs) - - def humanize(self): - if isinstance(self.when, numbers.Real): - return 'in {0.when}s'.format(self) - return 'at {0.when}'.format(self) - - def __str__(self): - if self.message: - return self.message - if self.excs: - return 'Retry {0}: {1}'.format(self.humanize(), self.excs) - return 'Retry {0}'.format(self.humanize()) - - def __reduce__(self): - return self.__class__, (self.message, self.excs, self.when) -RetryTaskError = Retry # XXX compat - - -class TaskRevokedError(Exception): - """The task has been revoked, so no result available.""" - - -class NotConfigured(UserWarning): - """Celery has not been configured, as no config module has been found.""" - - -class AlwaysEagerIgnored(UserWarning): - """send_task ignores CELERY_ALWAYS_EAGER option""" - - -class InvalidTaskError(Exception): - """The task has invalid data or is not properly constructed.""" - - -class IncompleteStream(Exception): - """Found the end of a stream of data, but the data is not yet complete.""" - - -class ChordError(Exception): - """A task part of the chord raised an exception.""" - - -class CPendingDeprecationWarning(PendingDeprecationWarning): - pass - - -class CDeprecationWarning(DeprecationWarning): - pass - - -class FixupWarning(UserWarning): - pass - - -class DuplicateNodenameWarning(UserWarning): - """Multiple workers are using the same nodename.""" diff --git a/thesisenv/lib/python3.6/site-packages/celery/five.py b/thesisenv/lib/python3.6/site-packages/celery/five.py deleted file mode 100644 index 2406920..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/five.py +++ /dev/null @@ -1,392 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.five - ~~~~~~~~~~~ - - Compatibility implementations of features - only available in newer Python versions. - - -""" -from __future__ import absolute_import - -import io -import operator -import sys - -from importlib import import_module -from types import ModuleType - -from kombu.five import monotonic - -try: - from collections import Counter -except ImportError: # pragma: no cover - from collections import defaultdict - - def Counter(): # noqa - return defaultdict(int) - -__all__ = ['Counter', 'reload', 'UserList', 'UserDict', 'Queue', 'Empty', - 'zip_longest', 'map', 'string', 'string_t', - 'long_t', 'text_t', 'range', 'int_types', 'items', 'keys', 'values', - 'nextfun', 'reraise', 'WhateverIO', 'with_metaclass', - 'OrderedDict', 'THREAD_TIMEOUT_MAX', 'format_d', - 'class_property', 'reclassmethod', 'create_module', - 'recreate_module', 'monotonic'] - -# ############# py3k ######################################################### -PY3 = sys.version_info[0] == 3 - -try: - reload = reload # noqa -except NameError: # pragma: no cover - from imp import reload # noqa - -try: - from UserList import UserList # noqa -except ImportError: # pragma: no cover - from collections import UserList # noqa - -try: - from UserDict import UserDict # noqa -except ImportError: # pragma: no cover - from collections import UserDict # noqa - - -if PY3: # pragma: no cover - import builtins - - from queue import Queue, Empty - from itertools import zip_longest - - map = map - string = str - string_t = str - long_t = int - text_t = str - range = range - int_types = (int, ) - _byte_t = bytes - - open_fqdn = 'builtins.open' - - def items(d): - return d.items() - - def keys(d): - return d.keys() - - def values(d): - return d.values() - - def nextfun(it): - return it.__next__ - - exec_ = getattr(builtins, 'exec') - - def reraise(tp, value, tb=None): - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - -else: - import __builtin__ as builtins # noqa - from Queue import Queue, Empty # noqa - from itertools import imap as map, izip_longest as zip_longest # noqa - string = unicode # noqa - string_t = basestring # noqa - text_t = unicode # noqa - long_t = long # noqa - range = xrange # noqa - int_types = (int, long) # noqa - _byte_t = (str, bytes) # noqa - - open_fqdn = '__builtin__.open' - - def items(d): # noqa - return d.iteritems() - - def keys(d): # noqa - return d.iterkeys() - - def values(d): # noqa - return d.itervalues() - - def nextfun(it): # noqa - return it.next - - def exec_(code, globs=None, locs=None): # pragma: no cover - """Execute code in a namespace.""" - if globs is None: - frame = sys._getframe(1) - globs = frame.f_globals - if locs is None: - locs = frame.f_locals - del frame - elif locs is None: - locs = globs - exec("""exec code in globs, locs""") - - exec_("""def reraise(tp, value, tb=None): raise tp, value, tb""") - - -def with_metaclass(Type, skip_attrs=set(['__dict__', '__weakref__'])): - """Class decorator to set metaclass. - - Works with both Python 2 and Python 3 and it does not add - an extra class in the lookup order like ``six.with_metaclass`` does - (that is -- it copies the original class instead of using inheritance). - - """ - - def _clone_with_metaclass(Class): - attrs = dict((key, value) for key, value in items(vars(Class)) - if key not in skip_attrs) - return Type(Class.__name__, Class.__bases__, attrs) - - return _clone_with_metaclass - - -# ############# collections.OrderedDict ###################################### -# was moved to kombu -from kombu.utils.compat import OrderedDict # noqa - -# ############# threading.TIMEOUT_MAX ######################################## -try: - from threading import TIMEOUT_MAX as THREAD_TIMEOUT_MAX -except ImportError: - THREAD_TIMEOUT_MAX = 1e10 # noqa - -# ############# format(int, ',d') ############################################ - -if sys.version_info >= (2, 7): # pragma: no cover - def format_d(i): - return format(i, ',d') -else: # pragma: no cover - def format_d(i): # noqa - s = '%d' % i - groups = [] - while s and s[-1].isdigit(): - groups.append(s[-3:]) - s = s[:-3] - return s + ','.join(reversed(groups)) - - -# ############# Module Generation ############################################ - -# Utilities to dynamically -# recreate modules, either for lazy loading or -# to create old modules at runtime instead of -# having them litter the source tree. - -# import fails in python 2.5. fallback to reduce in stdlib -try: - from functools import reduce -except ImportError: - pass - -MODULE_DEPRECATED = """ -The module %s is deprecated and will be removed in a future version. -""" - -DEFAULT_ATTRS = set(['__file__', '__path__', '__doc__', '__all__']) - -# im_func is no longer available in Py3. -# instead the unbound method itself can be used. -if sys.version_info[0] == 3: # pragma: no cover - def fun_of_method(method): - return method -else: - def fun_of_method(method): # noqa - return method.im_func - - -def getappattr(path): - """Gets attribute from the current_app recursively, - e.g. getappattr('amqp.get_task_consumer')``.""" - from celery import current_app - return current_app._rgetattr(path) - - -def _compat_task_decorator(*args, **kwargs): - from celery import current_app - kwargs.setdefault('accept_magic_kwargs', True) - return current_app.task(*args, **kwargs) - - -def _compat_periodic_task_decorator(*args, **kwargs): - from celery.task import periodic_task - kwargs.setdefault('accept_magic_kwargs', True) - return periodic_task(*args, **kwargs) - - -COMPAT_MODULES = { - 'celery': { - 'execute': { - 'send_task': 'send_task', - }, - 'decorators': { - 'task': _compat_task_decorator, - 'periodic_task': _compat_periodic_task_decorator, - }, - 'log': { - 'get_default_logger': 'log.get_default_logger', - 'setup_logger': 'log.setup_logger', - 'setup_logging_subsystem': 'log.setup_logging_subsystem', - 'redirect_stdouts_to_logger': 'log.redirect_stdouts_to_logger', - }, - 'messaging': { - 'TaskPublisher': 'amqp.TaskPublisher', - 'TaskConsumer': 'amqp.TaskConsumer', - 'establish_connection': 'connection', - 'get_consumer_set': 'amqp.TaskConsumer', - }, - 'registry': { - 'tasks': 'tasks', - }, - }, - 'celery.task': { - 'control': { - 'broadcast': 'control.broadcast', - 'rate_limit': 'control.rate_limit', - 'time_limit': 'control.time_limit', - 'ping': 'control.ping', - 'revoke': 'control.revoke', - 'discard_all': 'control.purge', - 'inspect': 'control.inspect', - }, - 'schedules': 'celery.schedules', - 'chords': 'celery.canvas', - } -} - - -class class_property(object): - - def __init__(self, getter=None, setter=None): - if getter is not None and not isinstance(getter, classmethod): - getter = classmethod(getter) - if setter is not None and not isinstance(setter, classmethod): - setter = classmethod(setter) - self.__get = getter - self.__set = setter - - info = getter.__get__(object) # just need the info attrs. - self.__doc__ = info.__doc__ - self.__name__ = info.__name__ - self.__module__ = info.__module__ - - def __get__(self, obj, type=None): - if obj and type is None: - type = obj.__class__ - return self.__get.__get__(obj, type)() - - def __set__(self, obj, value): - if obj is None: - return self - return self.__set.__get__(obj)(value) - - def setter(self, setter): - return self.__class__(self.__get, setter) - - -def reclassmethod(method): - return classmethod(fun_of_method(method)) - - -class LazyModule(ModuleType): - _compat_modules = () - _all_by_module = {} - _direct = {} - _object_origins = {} - - def __getattr__(self, name): - if name in self._object_origins: - module = __import__(self._object_origins[name], None, None, [name]) - for item in self._all_by_module[module.__name__]: - setattr(self, item, getattr(module, item)) - return getattr(module, name) - elif name in self._direct: # pragma: no cover - module = __import__(self._direct[name], None, None, [name]) - setattr(self, name, module) - return module - return ModuleType.__getattribute__(self, name) - - def __dir__(self): - return list(set(self.__all__) | DEFAULT_ATTRS) - - def __reduce__(self): - return import_module, (self.__name__, ) - - -def create_module(name, attrs, cls_attrs=None, pkg=None, - base=LazyModule, prepare_attr=None): - fqdn = '.'.join([pkg.__name__, name]) if pkg else name - cls_attrs = {} if cls_attrs is None else cls_attrs - pkg, _, modname = name.rpartition('.') - cls_attrs['__module__'] = pkg - - attrs = dict((attr_name, prepare_attr(attr) if prepare_attr else attr) - for attr_name, attr in items(attrs)) - module = sys.modules[fqdn] = type(modname, (base, ), cls_attrs)(fqdn) - module.__dict__.update(attrs) - return module - - -def recreate_module(name, compat_modules=(), by_module={}, direct={}, - base=LazyModule, **attrs): - old_module = sys.modules[name] - origins = get_origins(by_module) - compat_modules = COMPAT_MODULES.get(name, ()) - - cattrs = dict( - _compat_modules=compat_modules, - _all_by_module=by_module, _direct=direct, - _object_origins=origins, - __all__=tuple(set(reduce( - operator.add, - [tuple(v) for v in [compat_modules, origins, direct, attrs]], - ))), - ) - new_module = create_module(name, attrs, cls_attrs=cattrs, base=base) - new_module.__dict__.update(dict((mod, get_compat_module(new_module, mod)) - for mod in compat_modules)) - return old_module, new_module - - -def get_compat_module(pkg, name): - from .local import Proxy - - def prepare(attr): - if isinstance(attr, string_t): - return Proxy(getappattr, (attr, )) - return attr - - attrs = COMPAT_MODULES[pkg.__name__][name] - if isinstance(attrs, string_t): - fqdn = '.'.join([pkg.__name__, name]) - module = sys.modules[fqdn] = import_module(attrs) - return module - attrs['__all__'] = list(attrs) - return create_module(name, dict(attrs), pkg=pkg, prepare_attr=prepare) - - -def get_origins(defs): - origins = {} - for module, attrs in items(defs): - origins.update(dict((attr, module) for attr in attrs)) - return origins - - -_SIO_write = io.StringIO.write -_SIO_init = io.StringIO.__init__ - - -class WhateverIO(io.StringIO): - - def __init__(self, v=None, *a, **kw): - _SIO_init(self, v.decode() if isinstance(v, _byte_t) else v, *a, **kw) - - def write(self, data): - _SIO_write(self, data.decode() if isinstance(data, _byte_t) else data) diff --git a/thesisenv/lib/python3.6/site-packages/celery/fixups/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/fixups/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/thesisenv/lib/python3.6/site-packages/celery/fixups/django.py b/thesisenv/lib/python3.6/site-packages/celery/fixups/django.py deleted file mode 100644 index 73c5c28..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/fixups/django.py +++ /dev/null @@ -1,266 +0,0 @@ -from __future__ import absolute_import - -import os -import sys -import warnings - -from kombu.utils import cached_property, symbol_by_name - -from datetime import datetime -from importlib import import_module - -from celery import signals -from celery.exceptions import FixupWarning - -if sys.version_info[0] < 3 and not hasattr(sys, 'pypy_version_info'): - from StringIO import StringIO -else: - from io import StringIO - -__all__ = ['DjangoFixup', 'fixup'] - -ERR_NOT_INSTALLED = """\ -Environment variable DJANGO_SETTINGS_MODULE is defined -but Django is not installed. Will not apply Django fixups! -""" - - -def _maybe_close_fd(fh): - try: - os.close(fh.fileno()) - except (AttributeError, OSError, TypeError): - # TypeError added for celery#962 - pass - - -def fixup(app, env='DJANGO_SETTINGS_MODULE'): - SETTINGS_MODULE = os.environ.get(env) - if SETTINGS_MODULE and 'django' not in app.loader_cls.lower(): - try: - import django # noqa - except ImportError: - warnings.warn(FixupWarning(ERR_NOT_INSTALLED)) - else: - return DjangoFixup(app).install() - - -class DjangoFixup(object): - - def __init__(self, app): - self.app = app - self.app.set_default() - self._worker_fixup = None - - def install(self): - # Need to add project directory to path - sys.path.append(os.getcwd()) - - self.app.loader.now = self.now - self.app.loader.mail_admins = self.mail_admins - - signals.import_modules.connect(self.on_import_modules) - signals.worker_init.connect(self.on_worker_init) - return self - - @cached_property - def worker_fixup(self): - if self._worker_fixup is None: - self._worker_fixup = DjangoWorkerFixup(self.app) - return self._worker_fixup - - def on_import_modules(self, **kwargs): - # call django.setup() before task modules are imported - self.worker_fixup.validate_models() - - def on_worker_init(self, **kwargs): - self.worker_fixup.install() - - def now(self, utc=False): - return datetime.utcnow() if utc else self._now() - - def mail_admins(self, subject, body, fail_silently=False, **kwargs): - return self._mail_admins(subject, body, fail_silently=fail_silently) - - @cached_property - def _mail_admins(self): - return symbol_by_name('django.core.mail:mail_admins') - - @cached_property - def _now(self): - try: - return symbol_by_name('django.utils.timezone:now') - except (AttributeError, ImportError): # pre django-1.4 - return datetime.now - - -class DjangoWorkerFixup(object): - _db_recycles = 0 - - def __init__(self, app): - self.app = app - self.db_reuse_max = self.app.conf.get('CELERY_DB_REUSE_MAX', None) - self._db = import_module('django.db') - self._cache = import_module('django.core.cache') - self._settings = symbol_by_name('django.conf:settings') - - # Database-related exceptions. - DatabaseError = symbol_by_name('django.db:DatabaseError') - try: - import MySQLdb as mysql - _my_database_errors = (mysql.DatabaseError, - mysql.InterfaceError, - mysql.OperationalError) - except ImportError: - _my_database_errors = () # noqa - try: - import psycopg2 as pg - _pg_database_errors = (pg.DatabaseError, - pg.InterfaceError, - pg.OperationalError) - except ImportError: - _pg_database_errors = () # noqa - try: - import sqlite3 - _lite_database_errors = (sqlite3.DatabaseError, - sqlite3.InterfaceError, - sqlite3.OperationalError) - except ImportError: - _lite_database_errors = () # noqa - try: - import cx_Oracle as oracle - _oracle_database_errors = (oracle.DatabaseError, - oracle.InterfaceError, - oracle.OperationalError) - except ImportError: - _oracle_database_errors = () # noqa - - try: - self._close_old_connections = symbol_by_name( - 'django.db:close_old_connections', - ) - except (ImportError, AttributeError): - self._close_old_connections = None - self.database_errors = ( - (DatabaseError, ) + - _my_database_errors + - _pg_database_errors + - _lite_database_errors + - _oracle_database_errors - ) - - def validate_models(self): - import django - try: - django_setup = django.setup - except AttributeError: - pass - else: - django_setup() - s = StringIO() - try: - from django.core.management.validation import get_validation_errors - except ImportError: - from django.core.management.base import BaseCommand - cmd = BaseCommand() - try: - # since django 1.5 - from django.core.management.base import OutputWrapper - cmd.stdout = OutputWrapper(sys.stdout) - cmd.stderr = OutputWrapper(sys.stderr) - except ImportError: - cmd.stdout, cmd.stderr = sys.stdout, sys.stderr - - cmd.check() - else: - num_errors = get_validation_errors(s, None) - if num_errors: - raise RuntimeError( - 'One or more Django models did not validate:\n{0}'.format( - s.getvalue())) - - def install(self): - signals.beat_embedded_init.connect(self.close_database) - signals.worker_ready.connect(self.on_worker_ready) - signals.task_prerun.connect(self.on_task_prerun) - signals.task_postrun.connect(self.on_task_postrun) - signals.worker_process_init.connect(self.on_worker_process_init) - self.close_database() - self.close_cache() - return self - - def on_worker_process_init(self, **kwargs): - # Child process must validate models again if on Windows, - # or if they were started using execv. - if os.environ.get('FORKED_BY_MULTIPROCESSING'): - self.validate_models() - - # close connections: - # the parent process may have established these, - # so need to close them. - - # calling db.close() on some DB connections will cause - # the inherited DB conn to also get broken in the parent - # process so we need to remove it without triggering any - # network IO that close() might cause. - try: - for c in self._db.connections.all(): - if c and c.connection: - _maybe_close_fd(c.connection) - except AttributeError: - if self._db.connection and self._db.connection.connection: - _maybe_close_fd(self._db.connection.connection) - - # use the _ version to avoid DB_REUSE preventing the conn.close() call - self._close_database() - self.close_cache() - - def on_task_prerun(self, sender, **kwargs): - """Called before every task.""" - if not getattr(sender.request, 'is_eager', False): - self.close_database() - - def on_task_postrun(self, sender, **kwargs): - # See http://groups.google.com/group/django-users/ - # browse_thread/thread/78200863d0c07c6d/ - if not getattr(sender.request, 'is_eager', False): - self.close_database() - self.close_cache() - - def close_database(self, **kwargs): - if self._close_old_connections: - return self._close_old_connections() # Django 1.6 - if not self.db_reuse_max: - return self._close_database() - if self._db_recycles >= self.db_reuse_max * 2: - self._db_recycles = 0 - self._close_database() - self._db_recycles += 1 - - def _close_database(self): - try: - funs = [conn.close for conn in self._db.connections.all()] - except AttributeError: - if hasattr(self._db, 'close_old_connections'): # django 1.6 - funs = [self._db.close_old_connections] - else: - # pre multidb, pending deprication in django 1.6 - funs = [self._db.close_connection] - - for close in funs: - try: - close() - except self.database_errors as exc: - str_exc = str(exc) - if 'closed' not in str_exc and 'not connected' not in str_exc: - raise - - def close_cache(self): - try: - self._cache.cache.close() - except (TypeError, AttributeError): - pass - - def on_worker_ready(self, **kwargs): - if self._settings.DEBUG: - warnings.warn('Using settings.DEBUG leads to a memory leak, never ' - 'use this setting in production environments!') diff --git a/thesisenv/lib/python3.6/site-packages/celery/loaders/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/loaders/__init__.py deleted file mode 100644 index 2a39ba2..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/loaders/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.loaders - ~~~~~~~~~~~~~~ - - Loaders define how configuration is read, what happens - when workers start, when tasks are executed and so on. - -""" -from __future__ import absolute_import - -from celery._state import current_app -from celery.utils import deprecated -from celery.utils.imports import symbol_by_name, import_from_cwd - -__all__ = ['get_loader_cls'] - -LOADER_ALIASES = {'app': 'celery.loaders.app:AppLoader', - 'default': 'celery.loaders.default:Loader', - 'django': 'djcelery.loaders:DjangoLoader'} - - -def get_loader_cls(loader): - """Get loader class by name/alias""" - return symbol_by_name(loader, LOADER_ALIASES, imp=import_from_cwd) - - -@deprecated(deprecation=2.5, removal=4.0, - alternative='celery.current_app.loader') -def current_loader(): - return current_app.loader - - -@deprecated(deprecation=2.5, removal=4.0, - alternative='celery.current_app.conf') -def load_settings(): - return current_app.conf diff --git a/thesisenv/lib/python3.6/site-packages/celery/loaders/app.py b/thesisenv/lib/python3.6/site-packages/celery/loaders/app.py deleted file mode 100644 index 87f034b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/loaders/app.py +++ /dev/null @@ -1,17 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.loaders.app - ~~~~~~~~~~~~~~~~~~ - - The default loader used with custom app instances. - -""" -from __future__ import absolute_import - -from .base import BaseLoader - -__all__ = ['AppLoader'] - - -class AppLoader(BaseLoader): - pass diff --git a/thesisenv/lib/python3.6/site-packages/celery/loaders/base.py b/thesisenv/lib/python3.6/site-packages/celery/loaders/base.py deleted file mode 100644 index 401be7b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/loaders/base.py +++ /dev/null @@ -1,299 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.loaders.base - ~~~~~~~~~~~~~~~~~~~ - - Loader base class. - -""" -from __future__ import absolute_import - -import anyjson -import imp as _imp -import importlib -import os -import re -import sys - -from datetime import datetime - -from kombu.utils import cached_property -from kombu.utils.encoding import safe_str - -from celery import signals -from celery.datastructures import DictAttribute, force_mapping -from celery.five import reraise, string_t -from celery.utils.functional import maybe_list -from celery.utils.imports import ( - import_from_cwd, symbol_by_name, NotAPackage, find_module, -) - -__all__ = ['BaseLoader'] - -_RACE_PROTECTION = False -CONFIG_INVALID_NAME = """\ -Error: Module '{module}' doesn't exist, or it's not a valid \ -Python module name. -""" - -CONFIG_WITH_SUFFIX = CONFIG_INVALID_NAME + """\ -Did you mean '{suggest}'? -""" - - -class BaseLoader(object): - """The base class for loaders. - - Loaders handles, - - * Reading celery client/worker configurations. - - * What happens when a task starts? - See :meth:`on_task_init`. - - * What happens when the worker starts? - See :meth:`on_worker_init`. - - * What happens when the worker shuts down? - See :meth:`on_worker_shutdown`. - - * What modules are imported to find tasks? - - """ - builtin_modules = frozenset() - configured = False - override_backends = {} - worker_initialized = False - - _conf = None - - def __init__(self, app, **kwargs): - self.app = app - self.task_modules = set() - - def now(self, utc=True): - if utc: - return datetime.utcnow() - return datetime.now() - - def on_task_init(self, task_id, task): - """This method is called before a task is executed.""" - pass - - def on_process_cleanup(self): - """This method is called after a task is executed.""" - pass - - def on_worker_init(self): - """This method is called when the worker (:program:`celery worker`) - starts.""" - pass - - def on_worker_shutdown(self): - """This method is called when the worker (:program:`celery worker`) - shuts down.""" - pass - - def on_worker_process_init(self): - """This method is called when a child process starts.""" - pass - - def import_task_module(self, module): - self.task_modules.add(module) - return self.import_from_cwd(module) - - def import_module(self, module, package=None): - return importlib.import_module(module, package=package) - - def import_from_cwd(self, module, imp=None, package=None): - return import_from_cwd( - module, - self.import_module if imp is None else imp, - package=package, - ) - - def import_default_modules(self): - signals.import_modules.send(sender=self.app) - return [ - self.import_task_module(m) for m in ( - tuple(self.builtin_modules) + - tuple(maybe_list(self.app.conf.CELERY_IMPORTS)) + - tuple(maybe_list(self.app.conf.CELERY_INCLUDE)) - ) - ] - - def init_worker(self): - if not self.worker_initialized: - self.worker_initialized = True - self.import_default_modules() - self.on_worker_init() - - def shutdown_worker(self): - self.on_worker_shutdown() - - def init_worker_process(self): - self.on_worker_process_init() - - def config_from_object(self, obj, silent=False): - if isinstance(obj, string_t): - try: - obj = self._smart_import(obj, imp=self.import_from_cwd) - except (ImportError, AttributeError): - if silent: - return False - raise - self._conf = force_mapping(obj) - return True - - def _smart_import(self, path, imp=None): - imp = self.import_module if imp is None else imp - if ':' in path: - # Path includes attribute so can just jump here. - # e.g. ``os.path:abspath``. - return symbol_by_name(path, imp=imp) - - # Not sure if path is just a module name or if it includes an - # attribute name (e.g. ``os.path``, vs, ``os.path.abspath``). - try: - return imp(path) - except ImportError: - # Not a module name, so try module + attribute. - return symbol_by_name(path, imp=imp) - - def _import_config_module(self, name): - try: - self.find_module(name) - except NotAPackage: - if name.endswith('.py'): - reraise(NotAPackage, NotAPackage(CONFIG_WITH_SUFFIX.format( - module=name, suggest=name[:-3])), sys.exc_info()[2]) - reraise(NotAPackage, NotAPackage(CONFIG_INVALID_NAME.format( - module=name)), sys.exc_info()[2]) - else: - return self.import_from_cwd(name) - - def find_module(self, module): - return find_module(module) - - def cmdline_config_parser( - self, args, namespace='celery', - re_type=re.compile(r'\((\w+)\)'), - extra_types={'json': anyjson.loads}, - override_types={'tuple': 'json', - 'list': 'json', - 'dict': 'json'}): - from celery.app.defaults import Option, NAMESPACES - namespace = namespace.upper() - typemap = dict(Option.typemap, **extra_types) - - def getarg(arg): - """Parse a single configuration definition from - the command-line.""" - - # ## find key/value - # ns.key=value|ns_key=value (case insensitive) - key, value = arg.split('=', 1) - key = key.upper().replace('.', '_') - - # ## find namespace. - # .key=value|_key=value expands to default namespace. - if key[0] == '_': - ns, key = namespace, key[1:] - else: - # find namespace part of key - ns, key = key.split('_', 1) - - ns_key = (ns and ns + '_' or '') + key - - # (type)value makes cast to custom type. - cast = re_type.match(value) - if cast: - type_ = cast.groups()[0] - type_ = override_types.get(type_, type_) - value = value[len(cast.group()):] - value = typemap[type_](value) - else: - try: - value = NAMESPACES[ns][key].to_python(value) - except ValueError as exc: - # display key name in error message. - raise ValueError('{0!r}: {1}'.format(ns_key, exc)) - return ns_key, value - return dict(getarg(arg) for arg in args) - - def mail_admins(self, subject, body, fail_silently=False, - sender=None, to=None, host=None, port=None, - user=None, password=None, timeout=None, - use_ssl=False, use_tls=False, charset='utf-8'): - message = self.mail.Message(sender=sender, to=to, - subject=safe_str(subject), - body=safe_str(body), - charset=charset) - mailer = self.mail.Mailer(host=host, port=port, - user=user, password=password, - timeout=timeout, use_ssl=use_ssl, - use_tls=use_tls) - mailer.send(message, fail_silently=fail_silently) - - def read_configuration(self, env='CELERY_CONFIG_MODULE'): - try: - custom_config = os.environ[env] - except KeyError: - pass - else: - if custom_config: - usercfg = self._import_config_module(custom_config) - return DictAttribute(usercfg) - return {} - - def autodiscover_tasks(self, packages, related_name='tasks'): - self.task_modules.update( - mod.__name__ for mod in autodiscover_tasks(packages or (), - related_name) if mod) - - @property - def conf(self): - """Loader configuration.""" - if self._conf is None: - self._conf = self.read_configuration() - return self._conf - - @cached_property - def mail(self): - return self.import_module('celery.utils.mail') - - -def autodiscover_tasks(packages, related_name='tasks'): - global _RACE_PROTECTION - - if _RACE_PROTECTION: - return () - _RACE_PROTECTION = True - try: - return [find_related_module(pkg, related_name) for pkg in packages] - finally: - _RACE_PROTECTION = False - - -def find_related_module(package, related_name): - """Given a package name and a module name, tries to find that - module.""" - - # Django 1.7 allows for speciying a class name in INSTALLED_APPS. - # (Issue #2248). - try: - importlib.import_module(package) - except ImportError: - package, _, _ = package.rpartition('.') - - try: - pkg_path = importlib.import_module(package).__path__ - except AttributeError: - return - - try: - _imp.find_module(related_name, pkg_path) - except ImportError: - return - - return importlib.import_module('{0}.{1}'.format(package, related_name)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/loaders/default.py b/thesisenv/lib/python3.6/site-packages/celery/loaders/default.py deleted file mode 100644 index 6071480..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/loaders/default.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.loaders.default - ~~~~~~~~~~~~~~~~~~~~~~ - - The default loader used when no custom app has been initialized. - -""" -from __future__ import absolute_import - -import os -import warnings - -from celery.datastructures import DictAttribute -from celery.exceptions import NotConfigured -from celery.utils import strtobool - -from .base import BaseLoader - -__all__ = ['Loader', 'DEFAULT_CONFIG_MODULE'] - -DEFAULT_CONFIG_MODULE = 'celeryconfig' - -#: Warns if configuration file is missing if :envvar:`C_WNOCONF` is set. -C_WNOCONF = strtobool(os.environ.get('C_WNOCONF', False)) - - -class Loader(BaseLoader): - """The loader used by the default app.""" - - def setup_settings(self, settingsdict): - return DictAttribute(settingsdict) - - def read_configuration(self, fail_silently=True): - """Read configuration from :file:`celeryconfig.py` and configure - celery and Django so it can be used by regular Python.""" - configname = os.environ.get('CELERY_CONFIG_MODULE', - DEFAULT_CONFIG_MODULE) - try: - usercfg = self._import_config_module(configname) - except ImportError: - if not fail_silently: - raise - # billiard sets this if forked using execv - if C_WNOCONF and not os.environ.get('FORKED_BY_MULTIPROCESSING'): - warnings.warn(NotConfigured( - 'No {module} module found! Please make sure it exists and ' - 'is available to Python.'.format(module=configname))) - return self.setup_settings({}) - else: - self.configured = True - return self.setup_settings(usercfg) diff --git a/thesisenv/lib/python3.6/site-packages/celery/local.py b/thesisenv/lib/python3.6/site-packages/celery/local.py deleted file mode 100644 index 50da8bc..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/local.py +++ /dev/null @@ -1,373 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.local - ~~~~~~~~~~~~ - - This module contains critical utilities that - needs to be loaded as soon as possible, and that - shall not load any third party modules. - - Parts of this module is Copyright by Werkzeug Team. - -""" -from __future__ import absolute_import - -import importlib -import sys - -from .five import string - -__all__ = ['Proxy', 'PromiseProxy', 'try_import', 'maybe_evaluate'] - -__module__ = __name__ # used by Proxy class body - -PY3 = sys.version_info[0] == 3 - - -def _default_cls_attr(name, type_, cls_value): - # Proxy uses properties to forward the standard - # class attributes __module__, __name__ and __doc__ to the real - # object, but these needs to be a string when accessed from - # the Proxy class directly. This is a hack to make that work. - # -- See Issue #1087. - - def __new__(cls, getter): - instance = type_.__new__(cls, cls_value) - instance.__getter = getter - return instance - - def __get__(self, obj, cls=None): - return self.__getter(obj) if obj is not None else self - - return type(name, (type_, ), { - '__new__': __new__, '__get__': __get__, - }) - - -def try_import(module, default=None): - """Try to import and return module, or return - None if the module does not exist.""" - try: - return importlib.import_module(module) - except ImportError: - return default - - -class Proxy(object): - """Proxy to another object.""" - - # Code stolen from werkzeug.local.Proxy. - __slots__ = ('__local', '__args', '__kwargs', '__dict__') - - def __init__(self, local, - args=None, kwargs=None, name=None, __doc__=None): - object.__setattr__(self, '_Proxy__local', local) - object.__setattr__(self, '_Proxy__args', args or ()) - object.__setattr__(self, '_Proxy__kwargs', kwargs or {}) - if name is not None: - object.__setattr__(self, '__custom_name__', name) - if __doc__ is not None: - object.__setattr__(self, '__doc__', __doc__) - - @_default_cls_attr('name', str, __name__) - def __name__(self): - try: - return self.__custom_name__ - except AttributeError: - return self._get_current_object().__name__ - - @_default_cls_attr('module', str, __module__) - def __module__(self): - return self._get_current_object().__module__ - - @_default_cls_attr('doc', str, __doc__) - def __doc__(self): - return self._get_current_object().__doc__ - - def _get_class(self): - return self._get_current_object().__class__ - - @property - def __class__(self): - return self._get_class() - - def _get_current_object(self): - """Return the current object. This is useful if you want the real - object behind the proxy at a time for performance reasons or because - you want to pass the object into a different context. - """ - loc = object.__getattribute__(self, '_Proxy__local') - if not hasattr(loc, '__release_local__'): - return loc(*self.__args, **self.__kwargs) - try: - return getattr(loc, self.__name__) - except AttributeError: - raise RuntimeError('no object bound to {0.__name__}'.format(self)) - - @property - def __dict__(self): - try: - return self._get_current_object().__dict__ - except RuntimeError: # pragma: no cover - raise AttributeError('__dict__') - - def __repr__(self): - try: - obj = self._get_current_object() - except RuntimeError: # pragma: no cover - return '<{0} unbound>'.format(self.__class__.__name__) - return repr(obj) - - def __bool__(self): - try: - return bool(self._get_current_object()) - except RuntimeError: # pragma: no cover - return False - __nonzero__ = __bool__ # Py2 - - def __unicode__(self): - try: - return string(self._get_current_object()) - except RuntimeError: # pragma: no cover - return repr(self) - - def __dir__(self): - try: - return dir(self._get_current_object()) - except RuntimeError: # pragma: no cover - return [] - - def __getattr__(self, name): - if name == '__members__': - return dir(self._get_current_object()) - return getattr(self._get_current_object(), name) - - def __setitem__(self, key, value): - self._get_current_object()[key] = value - - def __delitem__(self, key): - del self._get_current_object()[key] - - def __setslice__(self, i, j, seq): - self._get_current_object()[i:j] = seq - - def __delslice__(self, i, j): - del self._get_current_object()[i:j] - - def __setattr__(self, name, value): - setattr(self._get_current_object(), name, value) - - def __delattr__(self, name): - delattr(self._get_current_object(), name) - - def __str__(self): - return str(self._get_current_object()) - - def __lt__(self, other): - return self._get_current_object() < other - - def __le__(self, other): - return self._get_current_object() <= other - - def __eq__(self, other): - return self._get_current_object() == other - - def __ne__(self, other): - return self._get_current_object() != other - - def __gt__(self, other): - return self._get_current_object() > other - - def __ge__(self, other): - return self._get_current_object() >= other - - def __hash__(self): - return hash(self._get_current_object()) - - def __call__(self, *a, **kw): - return self._get_current_object()(*a, **kw) - - def __len__(self): - return len(self._get_current_object()) - - def __getitem__(self, i): - return self._get_current_object()[i] - - def __iter__(self): - return iter(self._get_current_object()) - - def __contains__(self, i): - return i in self._get_current_object() - - def __getslice__(self, i, j): - return self._get_current_object()[i:j] - - def __add__(self, other): - return self._get_current_object() + other - - def __sub__(self, other): - return self._get_current_object() - other - - def __mul__(self, other): - return self._get_current_object() * other - - def __floordiv__(self, other): - return self._get_current_object() // other - - def __mod__(self, other): - return self._get_current_object() % other - - def __divmod__(self, other): - return self._get_current_object().__divmod__(other) - - def __pow__(self, other): - return self._get_current_object() ** other - - def __lshift__(self, other): - return self._get_current_object() << other - - def __rshift__(self, other): - return self._get_current_object() >> other - - def __and__(self, other): - return self._get_current_object() & other - - def __xor__(self, other): - return self._get_current_object() ^ other - - def __or__(self, other): - return self._get_current_object() | other - - def __div__(self, other): - return self._get_current_object().__div__(other) - - def __truediv__(self, other): - return self._get_current_object().__truediv__(other) - - def __neg__(self): - return -(self._get_current_object()) - - def __pos__(self): - return +(self._get_current_object()) - - def __abs__(self): - return abs(self._get_current_object()) - - def __invert__(self): - return ~(self._get_current_object()) - - def __complex__(self): - return complex(self._get_current_object()) - - def __int__(self): - return int(self._get_current_object()) - - def __float__(self): - return float(self._get_current_object()) - - def __oct__(self): - return oct(self._get_current_object()) - - def __hex__(self): - return hex(self._get_current_object()) - - def __index__(self): - return self._get_current_object().__index__() - - def __coerce__(self, other): - return self._get_current_object().__coerce__(other) - - def __enter__(self): - return self._get_current_object().__enter__() - - def __exit__(self, *a, **kw): - return self._get_current_object().__exit__(*a, **kw) - - def __reduce__(self): - return self._get_current_object().__reduce__() - - if not PY3: - def __cmp__(self, other): - return cmp(self._get_current_object(), other) # noqa - - def __long__(self): - return long(self._get_current_object()) # noqa - - -class PromiseProxy(Proxy): - """This is a proxy to an object that has not yet been evaulated. - - :class:`Proxy` will evaluate the object each time, while the - promise will only evaluate it once. - - """ - - __slots__ = ('__pending__', ) - - def _get_current_object(self): - try: - return object.__getattribute__(self, '__thing') - except AttributeError: - return self.__evaluate__() - - def __then__(self, fun, *args, **kwargs): - if self.__evaluated__(): - return fun(*args, **kwargs) - from collections import deque - try: - pending = object.__getattribute__(self, '__pending__') - except AttributeError: - pending = None - if pending is None: - pending = deque() - object.__setattr__(self, '__pending__', pending) - pending.append((fun, args, kwargs)) - - def __evaluated__(self): - try: - object.__getattribute__(self, '__thing') - except AttributeError: - return False - return True - - def __maybe_evaluate__(self): - return self._get_current_object() - - def __evaluate__(self, - _clean=('_Proxy__local', - '_Proxy__args', - '_Proxy__kwargs')): - try: - thing = Proxy._get_current_object(self) - except: - raise - else: - object.__setattr__(self, '__thing', thing) - for attr in _clean: - try: - object.__delattr__(self, attr) - except AttributeError: # pragma: no cover - # May mask errors so ignore - pass - try: - pending = object.__getattribute__(self, '__pending__') - except AttributeError: - pass - else: - try: - while pending: - fun, args, kwargs = pending.popleft() - fun(*args, **kwargs) - finally: - try: - object.__delattr__(self, '__pending__') - except AttributeError: - pass - return thing - - -def maybe_evaluate(obj): - try: - return obj.__maybe_evaluate__() - except AttributeError: - return obj diff --git a/thesisenv/lib/python3.6/site-packages/celery/platforms.py b/thesisenv/lib/python3.6/site-packages/celery/platforms.py deleted file mode 100644 index b0242d5..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/platforms.py +++ /dev/null @@ -1,813 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.platforms - ~~~~~~~~~~~~~~~~ - - Utilities dealing with platform specifics: signals, daemonization, - users, groups, and so on. - -""" -from __future__ import absolute_import, print_function - -import atexit -import errno -import math -import numbers -import os -import platform as _platform -import signal as _signal -import sys -import warnings - -from collections import namedtuple - -from billiard import current_process -# fileno used to be in this module -from kombu.utils import maybe_fileno -from kombu.utils.compat import get_errno -from kombu.utils.encoding import safe_str -from contextlib import contextmanager - -from .local import try_import -from .five import items, range, reraise, string_t, zip_longest -from .utils.functional import uniq - -_setproctitle = try_import('setproctitle') -resource = try_import('resource') -pwd = try_import('pwd') -grp = try_import('grp') -mputil = try_import('multiprocessing.util') - -__all__ = ['EX_OK', 'EX_FAILURE', 'EX_UNAVAILABLE', 'EX_USAGE', 'SYSTEM', - 'IS_OSX', 'IS_WINDOWS', 'pyimplementation', 'LockFailed', - 'get_fdmax', 'Pidfile', 'create_pidlock', - 'close_open_fds', 'DaemonContext', 'detached', 'parse_uid', - 'parse_gid', 'setgroups', 'initgroups', 'setgid', 'setuid', - 'maybe_drop_privileges', 'signals', 'set_process_title', - 'set_mp_process_title', 'get_errno_name', 'ignore_errno', - 'fd_by_path'] - -# exitcodes -EX_OK = getattr(os, 'EX_OK', 0) -EX_FAILURE = 1 -EX_UNAVAILABLE = getattr(os, 'EX_UNAVAILABLE', 69) -EX_USAGE = getattr(os, 'EX_USAGE', 64) -EX_CANTCREAT = getattr(os, 'EX_CANTCREAT', 73) - -SYSTEM = _platform.system() -IS_OSX = SYSTEM == 'Darwin' -IS_WINDOWS = SYSTEM == 'Windows' - -DAEMON_WORKDIR = '/' - -PIDFILE_FLAGS = os.O_CREAT | os.O_EXCL | os.O_WRONLY -PIDFILE_MODE = ((os.R_OK | os.W_OK) << 6) | ((os.R_OK) << 3) | ((os.R_OK)) - -PIDLOCKED = """ERROR: Pidfile ({0}) already exists. -Seems we're already running? (pid: {1})""" - -_range = namedtuple('_range', ('start', 'stop')) - -C_FORCE_ROOT = os.environ.get('C_FORCE_ROOT', False) - -ROOT_DISALLOWED = """\ -Running a worker with superuser privileges when the -worker accepts messages serialized with pickle is a very bad idea! - -If you really want to continue then you have to set the C_FORCE_ROOT -environment variable (but please think about this before you do). - -User information: uid={uid} euid={euid} gid={gid} egid={egid} -""" - -ROOT_DISCOURAGED = """\ -You are running the worker with superuser privileges, which is -absolutely not recommended! - -Please specify a different user using the -u option. - -User information: uid={uid} euid={euid} gid={gid} egid={egid} -""" - - -def pyimplementation(): - """Return string identifying the current Python implementation.""" - if hasattr(_platform, 'python_implementation'): - return _platform.python_implementation() - elif sys.platform.startswith('java'): - return 'Jython ' + sys.platform - elif hasattr(sys, 'pypy_version_info'): - v = '.'.join(str(p) for p in sys.pypy_version_info[:3]) - if sys.pypy_version_info[3:]: - v += '-' + ''.join(str(p) for p in sys.pypy_version_info[3:]) - return 'PyPy ' + v - else: - return 'CPython' - - -class LockFailed(Exception): - """Raised if a pidlock can't be acquired.""" - - -def get_fdmax(default=None): - """Return the maximum number of open file descriptors - on this system. - - :keyword default: Value returned if there's no file - descriptor limit. - - """ - try: - return os.sysconf('SC_OPEN_MAX') - except: - pass - if resource is None: # Windows - return default - fdmax = resource.getrlimit(resource.RLIMIT_NOFILE)[1] - if fdmax == resource.RLIM_INFINITY: - return default - return fdmax - - -class Pidfile(object): - """Pidfile - - This is the type returned by :func:`create_pidlock`. - - TIP: Use the :func:`create_pidlock` function instead, - which is more convenient and also removes stale pidfiles (when - the process holding the lock is no longer running). - - """ - - #: Path to the pid lock file. - path = None - - def __init__(self, path): - self.path = os.path.abspath(path) - - def acquire(self): - """Acquire lock.""" - try: - self.write_pid() - except OSError as exc: - reraise(LockFailed, LockFailed(str(exc)), sys.exc_info()[2]) - return self - __enter__ = acquire - - def is_locked(self): - """Return true if the pid lock exists.""" - return os.path.exists(self.path) - - def release(self, *args): - """Release lock.""" - self.remove() - __exit__ = release - - def read_pid(self): - """Read and return the current pid.""" - with ignore_errno('ENOENT'): - with open(self.path, 'r') as fh: - line = fh.readline() - if line.strip() == line: # must contain '\n' - raise ValueError( - 'Partial or invalid pidfile {0.path}'.format(self)) - - try: - return int(line.strip()) - except ValueError: - raise ValueError( - 'pidfile {0.path} contents invalid.'.format(self)) - - def remove(self): - """Remove the lock.""" - with ignore_errno(errno.ENOENT, errno.EACCES): - os.unlink(self.path) - - def remove_if_stale(self): - """Remove the lock if the process is not running. - (does not respond to signals).""" - try: - pid = self.read_pid() - except ValueError as exc: - print('Broken pidfile found. Removing it.', file=sys.stderr) - self.remove() - return True - if not pid: - self.remove() - return True - - try: - os.kill(pid, 0) - except os.error as exc: - if exc.errno == errno.ESRCH: - print('Stale pidfile exists. Removing it.', file=sys.stderr) - self.remove() - return True - return False - - def write_pid(self): - pid = os.getpid() - content = '{0}\n'.format(pid) - - pidfile_fd = os.open(self.path, PIDFILE_FLAGS, PIDFILE_MODE) - pidfile = os.fdopen(pidfile_fd, 'w') - try: - pidfile.write(content) - # flush and sync so that the re-read below works. - pidfile.flush() - try: - os.fsync(pidfile_fd) - except AttributeError: # pragma: no cover - pass - finally: - pidfile.close() - - rfh = open(self.path) - try: - if rfh.read() != content: - raise LockFailed( - "Inconsistency: Pidfile content doesn't match at re-read") - finally: - rfh.close() -PIDFile = Pidfile # compat alias - - -def create_pidlock(pidfile): - """Create and verify pidfile. - - If the pidfile already exists the program exits with an error message, - however if the process it refers to is not running anymore, the pidfile - is deleted and the program continues. - - This function will automatically install an :mod:`atexit` handler - to release the lock at exit, you can skip this by calling - :func:`_create_pidlock` instead. - - :returns: :class:`Pidfile`. - - **Example**: - - .. code-block:: python - - pidlock = create_pidlock('/var/run/app.pid') - - """ - pidlock = _create_pidlock(pidfile) - atexit.register(pidlock.release) - return pidlock - - -def _create_pidlock(pidfile): - pidlock = Pidfile(pidfile) - if pidlock.is_locked() and not pidlock.remove_if_stale(): - print(PIDLOCKED.format(pidfile, pidlock.read_pid()), file=sys.stderr) - raise SystemExit(EX_CANTCREAT) - pidlock.acquire() - return pidlock - - -def fd_by_path(paths): - """Return a list of fds. - - This method returns list of fds corresponding to - file paths passed in paths variable. - - :keyword paths: List of file paths go get fd for. - - :returns: :list:. - - **Example**: - - .. code-block:: python - - keep = fd_by_path(['/dev/urandom', - '/my/precious/']) - """ - stats = set() - for path in paths: - try: - fd = os.open(path, os.O_RDONLY) - except OSError: - continue - try: - stats.add(os.fstat(fd)[1:3]) - finally: - os.close(fd) - - def fd_in_stats(fd): - try: - return os.fstat(fd)[1:3] in stats - except OSError: - return False - - return [_fd for _fd in range(get_fdmax(2048)) if fd_in_stats(_fd)] - - -if hasattr(os, 'closerange'): - - def close_open_fds(keep=None): - # must make sure this is 0-inclusive (Issue #1882) - keep = list(uniq(sorted( - f for f in map(maybe_fileno, keep or []) if f is not None - ))) - maxfd = get_fdmax(default=2048) - kL, kH = iter([-1] + keep), iter(keep + [maxfd]) - for low, high in zip_longest(kL, kH): - if low + 1 != high: - os.closerange(low + 1, high) - -else: - - def close_open_fds(keep=None): # noqa - keep = [maybe_fileno(f) - for f in (keep or []) if maybe_fileno(f) is not None] - for fd in reversed(range(get_fdmax(default=2048))): - if fd not in keep: - with ignore_errno(errno.EBADF): - os.close(fd) - - -class DaemonContext(object): - _is_open = False - - def __init__(self, pidfile=None, workdir=None, umask=None, - fake=False, after_chdir=None, after_forkers=True, - **kwargs): - if isinstance(umask, string_t): - # octal or decimal, depending on initial zero. - umask = int(umask, 8 if umask.startswith('0') else 10) - self.workdir = workdir or DAEMON_WORKDIR - self.umask = umask - self.fake = fake - self.after_chdir = after_chdir - self.after_forkers = after_forkers - self.stdfds = (sys.stdin, sys.stdout, sys.stderr) - - def redirect_to_null(self, fd): - if fd is not None: - dest = os.open(os.devnull, os.O_RDWR) - os.dup2(dest, fd) - - def open(self): - if not self._is_open: - if not self.fake: - self._detach() - - os.chdir(self.workdir) - if self.umask is not None: - os.umask(self.umask) - - if self.after_chdir: - self.after_chdir() - - if not self.fake: - # We need to keep /dev/urandom from closing because - # shelve needs it, and Beat needs shelve to start. - keep = list(self.stdfds) + fd_by_path(['/dev/urandom']) - close_open_fds(keep) - for fd in self.stdfds: - self.redirect_to_null(maybe_fileno(fd)) - if self.after_forkers and mputil is not None: - mputil._run_after_forkers() - - self._is_open = True - __enter__ = open - - def close(self, *args): - if self._is_open: - self._is_open = False - __exit__ = close - - def _detach(self): - if os.fork() == 0: # first child - os.setsid() # create new session - if os.fork() > 0: # second child - os._exit(0) - else: - os._exit(0) - return self - - -def detached(logfile=None, pidfile=None, uid=None, gid=None, umask=0, - workdir=None, fake=False, **opts): - """Detach the current process in the background (daemonize). - - :keyword logfile: Optional log file. The ability to write to this file - will be verified before the process is detached. - :keyword pidfile: Optional pidfile. The pidfile will not be created, - as this is the responsibility of the child. But the process will - exit if the pid lock exists and the pid written is still running. - :keyword uid: Optional user id or user name to change - effective privileges to. - :keyword gid: Optional group id or group name to change effective - privileges to. - :keyword umask: Optional umask that will be effective in the child process. - :keyword workdir: Optional new working directory. - :keyword fake: Don't actually detach, intented for debugging purposes. - :keyword \*\*opts: Ignored. - - **Example**: - - .. code-block:: python - - from celery.platforms import detached, create_pidlock - - with detached(logfile='/var/log/app.log', pidfile='/var/run/app.pid', - uid='nobody'): - # Now in detached child process with effective user set to nobody, - # and we know that our logfile can be written to, and that - # the pidfile is not locked. - pidlock = create_pidlock('/var/run/app.pid') - - # Run the program - program.run(logfile='/var/log/app.log') - - """ - - if not resource: - raise RuntimeError('This platform does not support detach.') - workdir = os.getcwd() if workdir is None else workdir - - signals.reset('SIGCLD') # Make sure SIGCLD is using the default handler. - maybe_drop_privileges(uid=uid, gid=gid) - - def after_chdir_do(): - # Since without stderr any errors will be silently suppressed, - # we need to know that we have access to the logfile. - logfile and open(logfile, 'a').close() - # Doesn't actually create the pidfile, but makes sure it's not stale. - if pidfile: - _create_pidlock(pidfile).release() - - return DaemonContext( - umask=umask, workdir=workdir, fake=fake, after_chdir=after_chdir_do, - ) - - -def parse_uid(uid): - """Parse user id. - - uid can be an integer (uid) or a string (user name), if a user name - the uid is taken from the system user registry. - - """ - try: - return int(uid) - except ValueError: - try: - return pwd.getpwnam(uid).pw_uid - except (AttributeError, KeyError): - raise KeyError('User does not exist: {0}'.format(uid)) - - -def parse_gid(gid): - """Parse group id. - - gid can be an integer (gid) or a string (group name), if a group name - the gid is taken from the system group registry. - - """ - try: - return int(gid) - except ValueError: - try: - return grp.getgrnam(gid).gr_gid - except (AttributeError, KeyError): - raise KeyError('Group does not exist: {0}'.format(gid)) - - -def _setgroups_hack(groups): - """:fun:`setgroups` may have a platform-dependent limit, - and it is not always possible to know in advance what this limit - is, so we use this ugly hack stolen from glibc.""" - groups = groups[:] - - while 1: - try: - return os.setgroups(groups) - except ValueError: # error from Python's check. - if len(groups) <= 1: - raise - groups[:] = groups[:-1] - except OSError as exc: # error from the OS. - if exc.errno != errno.EINVAL or len(groups) <= 1: - raise - groups[:] = groups[:-1] - - -def setgroups(groups): - """Set active groups from a list of group ids.""" - max_groups = None - try: - max_groups = os.sysconf('SC_NGROUPS_MAX') - except Exception: - pass - try: - return _setgroups_hack(groups[:max_groups]) - except OSError as exc: - if exc.errno != errno.EPERM: - raise - if any(group not in groups for group in os.getgroups()): - # we shouldn't be allowed to change to this group. - raise - - -def initgroups(uid, gid): - """Compat version of :func:`os.initgroups` which was first - added to Python 2.7.""" - if not pwd: # pragma: no cover - return - username = pwd.getpwuid(uid)[0] - if hasattr(os, 'initgroups'): # Python 2.7+ - return os.initgroups(username, gid) - groups = [gr.gr_gid for gr in grp.getgrall() - if username in gr.gr_mem] - setgroups(groups) - - -def setgid(gid): - """Version of :func:`os.setgid` supporting group names.""" - os.setgid(parse_gid(gid)) - - -def setuid(uid): - """Version of :func:`os.setuid` supporting usernames.""" - os.setuid(parse_uid(uid)) - - -def maybe_drop_privileges(uid=None, gid=None): - """Change process privileges to new user/group. - - If UID and GID is specified, the real user/group is changed. - - If only UID is specified, the real user is changed, and the group is - changed to the users primary group. - - If only GID is specified, only the group is changed. - - """ - if sys.platform == 'win32': - return - if os.geteuid(): - # no point trying to setuid unless we're root. - if not os.getuid(): - raise AssertionError('contact support') - uid = uid and parse_uid(uid) - gid = gid and parse_gid(gid) - - if uid: - # If GID isn't defined, get the primary GID of the user. - if not gid and pwd: - gid = pwd.getpwuid(uid).pw_gid - # Must set the GID before initgroups(), as setgid() - # is known to zap the group list on some platforms. - - # setgid must happen before setuid (otherwise the setgid operation - # may fail because of insufficient privileges and possibly stay - # in a privileged group). - setgid(gid) - initgroups(uid, gid) - - # at last: - setuid(uid) - # ... and make sure privileges cannot be restored: - try: - setuid(0) - except OSError as exc: - if get_errno(exc) != errno.EPERM: - raise - pass # Good: cannot restore privileges. - else: - raise RuntimeError( - 'non-root user able to restore privileges after setuid.') - else: - gid and setgid(gid) - - if uid and (not os.getuid()) and not (os.geteuid()): - raise AssertionError('Still root uid after drop privileges!') - if gid and (not os.getgid()) and not (os.getegid()): - raise AssertionError('Still root gid after drop privileges!') - - -class Signals(object): - """Convenience interface to :mod:`signals`. - - If the requested signal is not supported on the current platform, - the operation will be ignored. - - **Examples**: - - .. code-block:: python - - >>> from celery.platforms import signals - - >>> from proj.handlers import my_handler - >>> signals['INT'] = my_handler - - >>> signals['INT'] - my_handler - - >>> signals.supported('INT') - True - - >>> signals.signum('INT') - 2 - - >>> signals.ignore('USR1') - >>> signals['USR1'] == signals.ignored - True - - >>> signals.reset('USR1') - >>> signals['USR1'] == signals.default - True - - >>> from proj.handlers import exit_handler, hup_handler - >>> signals.update(INT=exit_handler, - ... TERM=exit_handler, - ... HUP=hup_handler) - - """ - - ignored = _signal.SIG_IGN - default = _signal.SIG_DFL - - if hasattr(_signal, 'setitimer'): - - def arm_alarm(self, seconds): - _signal.setitimer(_signal.ITIMER_REAL, seconds) - else: # pragma: no cover - try: - from itimer import alarm as _itimer_alarm # noqa - except ImportError: - - def arm_alarm(self, seconds): # noqa - _signal.alarm(math.ceil(seconds)) - else: # pragma: no cover - - def arm_alarm(self, seconds): # noqa - return _itimer_alarm(seconds) # noqa - - def reset_alarm(self): - return _signal.alarm(0) - - def supported(self, signal_name): - """Return true value if ``signal_name`` exists on this platform.""" - try: - return self.signum(signal_name) - except AttributeError: - pass - - def signum(self, signal_name): - """Get signal number from signal name.""" - if isinstance(signal_name, numbers.Integral): - return signal_name - if not isinstance(signal_name, string_t) \ - or not signal_name.isupper(): - raise TypeError('signal name must be uppercase string.') - if not signal_name.startswith('SIG'): - signal_name = 'SIG' + signal_name - return getattr(_signal, signal_name) - - def reset(self, *signal_names): - """Reset signals to the default signal handler. - - Does nothing if the platform doesn't support signals, - or the specified signal in particular. - - """ - self.update((sig, self.default) for sig in signal_names) - - def ignore(self, *signal_names): - """Ignore signal using :const:`SIG_IGN`. - - Does nothing if the platform doesn't support signals, - or the specified signal in particular. - - """ - self.update((sig, self.ignored) for sig in signal_names) - - def __getitem__(self, signal_name): - return _signal.getsignal(self.signum(signal_name)) - - def __setitem__(self, signal_name, handler): - """Install signal handler. - - Does nothing if the current platform doesn't support signals, - or the specified signal in particular. - - """ - try: - _signal.signal(self.signum(signal_name), handler) - except (AttributeError, ValueError): - pass - - def update(self, _d_=None, **sigmap): - """Set signal handlers from a mapping.""" - for signal_name, handler in items(dict(_d_ or {}, **sigmap)): - self[signal_name] = handler - -signals = Signals() -get_signal = signals.signum # compat -install_signal_handler = signals.__setitem__ # compat -reset_signal = signals.reset # compat -ignore_signal = signals.ignore # compat - - -def strargv(argv): - arg_start = 2 if 'manage' in argv[0] else 1 - if len(argv) > arg_start: - return ' '.join(argv[arg_start:]) - return '' - - -def set_process_title(progname, info=None): - """Set the ps name for the currently running process. - - Only works if :mod:`setproctitle` is installed. - - """ - proctitle = '[{0}]'.format(progname) - proctitle = '{0} {1}'.format(proctitle, info) if info else proctitle - if _setproctitle: - _setproctitle.setproctitle(safe_str(proctitle)) - return proctitle - - -if os.environ.get('NOSETPS'): # pragma: no cover - - def set_mp_process_title(*a, **k): - pass -else: - - def set_mp_process_title(progname, info=None, hostname=None): # noqa - """Set the ps name using the multiprocessing process name. - - Only works if :mod:`setproctitle` is installed. - - """ - if hostname: - progname = '{0}: {1}'.format(progname, hostname) - return set_process_title( - '{0}:{1}'.format(progname, current_process().name), info=info) - - -def get_errno_name(n): - """Get errno for string, e.g. ``ENOENT``.""" - if isinstance(n, string_t): - return getattr(errno, n) - return n - - -@contextmanager -def ignore_errno(*errnos, **kwargs): - """Context manager to ignore specific POSIX error codes. - - Takes a list of error codes to ignore, which can be either - the name of the code, or the code integer itself:: - - >>> with ignore_errno('ENOENT'): - ... with open('foo', 'r') as fh: - ... return fh.read() - - >>> with ignore_errno(errno.ENOENT, errno.EPERM): - ... pass - - :keyword types: A tuple of exceptions to ignore (when the errno matches), - defaults to :exc:`Exception`. - """ - types = kwargs.get('types') or (Exception, ) - errnos = [get_errno_name(errno) for errno in errnos] - try: - yield - except types as exc: - if not hasattr(exc, 'errno'): - raise - if exc.errno not in errnos: - raise - - -def check_privileges(accept_content): - uid = os.getuid() if hasattr(os, 'getuid') else 65535 - gid = os.getgid() if hasattr(os, 'getgid') else 65535 - euid = os.geteuid() if hasattr(os, 'geteuid') else 65535 - egid = os.getegid() if hasattr(os, 'getegid') else 65535 - - if hasattr(os, 'fchown'): - if not all(hasattr(os, attr) - for attr in ['getuid', 'getgid', 'geteuid', 'getegid']): - raise AssertionError('suspicious platform, contact support') - - if not uid or not gid or not euid or not egid: - if ('pickle' in accept_content or - 'application/x-python-serialize' in accept_content): - if not C_FORCE_ROOT: - try: - print(ROOT_DISALLOWED.format( - uid=uid, euid=euid, gid=gid, egid=egid, - ), file=sys.stderr) - finally: - os._exit(1) - warnings.warn(RuntimeWarning(ROOT_DISCOURAGED.format( - uid=uid, euid=euid, gid=gid, egid=egid, - ))) diff --git a/thesisenv/lib/python3.6/site-packages/celery/result.py b/thesisenv/lib/python3.6/site-packages/celery/result.py deleted file mode 100644 index bf49d72..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/result.py +++ /dev/null @@ -1,925 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.result - ~~~~~~~~~~~~~ - - Task results/state and groups of results. - -""" -from __future__ import absolute_import - -import time -import warnings - -from collections import deque -from contextlib import contextmanager -from copy import copy - -from kombu.utils import cached_property -from kombu.utils.compat import OrderedDict - -from . import current_app -from . import states -from ._state import _set_task_join_will_block, task_join_will_block -from .app import app_or_default -from .datastructures import DependencyGraph, GraphFormatter -from .exceptions import IncompleteStream, TimeoutError -from .five import items, range, string_t, monotonic -from .utils import deprecated - -__all__ = ['ResultBase', 'AsyncResult', 'ResultSet', 'GroupResult', - 'EagerResult', 'result_from_tuple'] - -E_WOULDBLOCK = """\ -Never call result.get() within a task! -See http://docs.celeryq.org/en/latest/userguide/tasks.html\ -#task-synchronous-subtasks - -In Celery 3.2 this will result in an exception being -raised instead of just being a warning. -""" - - -def assert_will_not_block(): - if task_join_will_block(): - warnings.warn(RuntimeWarning(E_WOULDBLOCK)) - - -@contextmanager -def allow_join_result(): - reset_value = task_join_will_block() - _set_task_join_will_block(False) - try: - yield - finally: - _set_task_join_will_block(reset_value) - - -class ResultBase(object): - """Base class for all results""" - - #: Parent result (if part of a chain) - parent = None - - -class AsyncResult(ResultBase): - """Query task state. - - :param id: see :attr:`id`. - :keyword backend: see :attr:`backend`. - - """ - app = None - - #: Error raised for timeouts. - TimeoutError = TimeoutError - - #: The task's UUID. - id = None - - #: The task result backend to use. - backend = None - - def __init__(self, id, backend=None, task_name=None, - app=None, parent=None): - self.app = app_or_default(app or self.app) - self.id = id - self.backend = backend or self.app.backend - self.task_name = task_name - self.parent = parent - self._cache = None - - def as_tuple(self): - parent = self.parent - return (self.id, parent and parent.as_tuple()), None - serializable = as_tuple # XXX compat - - def forget(self): - """Forget about (and possibly remove the result of) this task.""" - self._cache = None - self.backend.forget(self.id) - - def revoke(self, connection=None, terminate=False, signal=None, - wait=False, timeout=None): - """Send revoke signal to all workers. - - Any worker receiving the task, or having reserved the - task, *must* ignore it. - - :keyword terminate: Also terminate the process currently working - on the task (if any). - :keyword signal: Name of signal to send to process if terminate. - Default is TERM. - :keyword wait: Wait for replies from workers. Will wait for 1 second - by default or you can specify a custom ``timeout``. - :keyword timeout: Time in seconds to wait for replies if ``wait`` - enabled. - - """ - self.app.control.revoke(self.id, connection=connection, - terminate=terminate, signal=signal, - reply=wait, timeout=timeout) - - def get(self, timeout=None, propagate=True, interval=0.5, - no_ack=True, follow_parents=True, - EXCEPTION_STATES=states.EXCEPTION_STATES, - PROPAGATE_STATES=states.PROPAGATE_STATES): - """Wait until task is ready, and return its result. - - .. warning:: - - Waiting for tasks within a task may lead to deadlocks. - Please read :ref:`task-synchronous-subtasks`. - - :keyword timeout: How long to wait, in seconds, before the - operation times out. - :keyword propagate: Re-raise exception if the task failed. - :keyword interval: Time to wait (in seconds) before retrying to - retrieve the result. Note that this does not have any effect - when using the amqp result store backend, as it does not - use polling. - :keyword no_ack: Enable amqp no ack (automatically acknowledge - message). If this is :const:`False` then the message will - **not be acked**. - :keyword follow_parents: Reraise any exception raised by parent task. - - :raises celery.exceptions.TimeoutError: if `timeout` is not - :const:`None` and the result does not arrive within `timeout` - seconds. - - If the remote call raised an exception then that exception will - be re-raised. - - """ - assert_will_not_block() - on_interval = None - if follow_parents and propagate and self.parent: - on_interval = self._maybe_reraise_parent_error - on_interval() - - if self._cache: - if propagate: - self.maybe_reraise() - return self.result - - meta = self.backend.wait_for( - self.id, timeout=timeout, - interval=interval, - on_interval=on_interval, - no_ack=no_ack, - ) - if meta: - self._maybe_set_cache(meta) - status = meta['status'] - if status in PROPAGATE_STATES and propagate: - raise meta['result'] - return meta['result'] - wait = get # deprecated alias to :meth:`get`. - - def _maybe_reraise_parent_error(self): - for node in reversed(list(self._parents())): - node.maybe_reraise() - - def _parents(self): - node = self.parent - while node: - yield node - node = node.parent - - def collect(self, intermediate=False, **kwargs): - """Iterator, like :meth:`get` will wait for the task to complete, - but will also follow :class:`AsyncResult` and :class:`ResultSet` - returned by the task, yielding ``(result, value)`` tuples for each - result in the tree. - - An example would be having the following tasks: - - .. code-block:: python - - from celery import group - from proj.celery import app - - @app.task(trail=True) - def A(how_many): - return group(B.s(i) for i in range(how_many))() - - @app.task(trail=True) - def B(i): - return pow2.delay(i) - - @app.task(trail=True) - def pow2(i): - return i ** 2 - - Note that the ``trail`` option must be enabled - so that the list of children is stored in ``result.children``. - This is the default but enabled explicitly for illustration. - - Calling :meth:`collect` would return: - - .. code-block:: python - - >>> from celery.result import ResultBase - >>> from proj.tasks import A - - >>> result = A.delay(10) - >>> [v for v in result.collect() - ... if not isinstance(v, (ResultBase, tuple))] - [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] - - """ - for _, R in self.iterdeps(intermediate=intermediate): - yield R, R.get(**kwargs) - - def get_leaf(self): - value = None - for _, R in self.iterdeps(): - value = R.get() - return value - - def iterdeps(self, intermediate=False): - stack = deque([(None, self)]) - - while stack: - parent, node = stack.popleft() - yield parent, node - if node.ready(): - stack.extend((node, child) for child in node.children or []) - else: - if not intermediate: - raise IncompleteStream() - - def ready(self): - """Returns :const:`True` if the task has been executed. - - If the task is still running, pending, or is waiting - for retry then :const:`False` is returned. - - """ - return self.state in self.backend.READY_STATES - - def successful(self): - """Returns :const:`True` if the task executed successfully.""" - return self.state == states.SUCCESS - - def failed(self): - """Returns :const:`True` if the task failed.""" - return self.state == states.FAILURE - - def maybe_reraise(self): - if self.state in states.PROPAGATE_STATES: - raise self.result - - def build_graph(self, intermediate=False, formatter=None): - graph = DependencyGraph( - formatter=formatter or GraphFormatter(root=self.id, shape='oval'), - ) - for parent, node in self.iterdeps(intermediate=intermediate): - graph.add_arc(node) - if parent: - graph.add_edge(parent, node) - return graph - - def __str__(self): - """`str(self) -> self.id`""" - return str(self.id) - - def __hash__(self): - """`hash(self) -> hash(self.id)`""" - return hash(self.id) - - def __repr__(self): - return '<{0}: {1}>'.format(type(self).__name__, self.id) - - def __eq__(self, other): - if isinstance(other, AsyncResult): - return other.id == self.id - elif isinstance(other, string_t): - return other == self.id - return NotImplemented - - def __ne__(self, other): - return not self.__eq__(other) - - def __copy__(self): - return self.__class__( - self.id, self.backend, self.task_name, self.app, self.parent, - ) - - def __reduce__(self): - return self.__class__, self.__reduce_args__() - - def __reduce_args__(self): - return self.id, self.backend, self.task_name, None, self.parent - - def __del__(self): - self._cache = None - - @cached_property - def graph(self): - return self.build_graph() - - @property - def supports_native_join(self): - return self.backend.supports_native_join - - @property - def children(self): - return self._get_task_meta().get('children') - - def _maybe_set_cache(self, meta): - if meta: - state = meta['status'] - if state == states.SUCCESS or state in states.PROPAGATE_STATES: - return self._set_cache(meta) - return meta - - def _get_task_meta(self): - if self._cache is None: - return self._maybe_set_cache(self.backend.get_task_meta(self.id)) - return self._cache - - def _set_cache(self, d): - children = d.get('children') - if children: - d['children'] = [ - result_from_tuple(child, self.app) for child in children - ] - self._cache = d - return d - - @property - def result(self): - """When the task has been executed, this contains the return value. - If the task raised an exception, this will be the exception - instance.""" - return self._get_task_meta()['result'] - info = result - - @property - def traceback(self): - """Get the traceback of a failed task.""" - return self._get_task_meta().get('traceback') - - @property - def state(self): - """The tasks current state. - - Possible values includes: - - *PENDING* - - The task is waiting for execution. - - *STARTED* - - The task has been started. - - *RETRY* - - The task is to be retried, possibly because of failure. - - *FAILURE* - - The task raised an exception, or has exceeded the retry limit. - The :attr:`result` attribute then contains the - exception raised by the task. - - *SUCCESS* - - The task executed successfully. The :attr:`result` attribute - then contains the tasks return value. - - """ - return self._get_task_meta()['status'] - status = state - - @property - def task_id(self): - """compat alias to :attr:`id`""" - return self.id - - @task_id.setter # noqa - def task_id(self, id): - self.id = id -BaseAsyncResult = AsyncResult # for backwards compatibility. - - -class ResultSet(ResultBase): - """Working with more than one result. - - :param results: List of result instances. - - """ - app = None - - #: List of results in in the set. - results = None - - def __init__(self, results, app=None, **kwargs): - self.app = app_or_default(app or self.app) - self.results = results - - def add(self, result): - """Add :class:`AsyncResult` as a new member of the set. - - Does nothing if the result is already a member. - - """ - if result not in self.results: - self.results.append(result) - - def remove(self, result): - """Remove result from the set; it must be a member. - - :raises KeyError: if the result is not a member. - - """ - if isinstance(result, string_t): - result = self.app.AsyncResult(result) - try: - self.results.remove(result) - except ValueError: - raise KeyError(result) - - def discard(self, result): - """Remove result from the set if it is a member. - - If it is not a member, do nothing. - - """ - try: - self.remove(result) - except KeyError: - pass - - def update(self, results): - """Update set with the union of itself and an iterable with - results.""" - self.results.extend(r for r in results if r not in self.results) - - def clear(self): - """Remove all results from this set.""" - self.results[:] = [] # don't create new list. - - def successful(self): - """Was all of the tasks successful? - - :returns: :const:`True` if all of the tasks finished - successfully (i.e. did not raise an exception). - - """ - return all(result.successful() for result in self.results) - - def failed(self): - """Did any of the tasks fail? - - :returns: :const:`True` if one of the tasks failed. - (i.e., raised an exception) - - """ - return any(result.failed() for result in self.results) - - def maybe_reraise(self): - for result in self.results: - result.maybe_reraise() - - def waiting(self): - """Are any of the tasks incomplete? - - :returns: :const:`True` if one of the tasks are still - waiting for execution. - - """ - return any(not result.ready() for result in self.results) - - def ready(self): - """Did all of the tasks complete? (either by success of failure). - - :returns: :const:`True` if all of the tasks has been - executed. - - """ - return all(result.ready() for result in self.results) - - def completed_count(self): - """Task completion count. - - :returns: the number of tasks completed. - - """ - return sum(int(result.successful()) for result in self.results) - - def forget(self): - """Forget about (and possible remove the result of) all the tasks.""" - for result in self.results: - result.forget() - - def revoke(self, connection=None, terminate=False, signal=None, - wait=False, timeout=None): - """Send revoke signal to all workers for all tasks in the set. - - :keyword terminate: Also terminate the process currently working - on the task (if any). - :keyword signal: Name of signal to send to process if terminate. - Default is TERM. - :keyword wait: Wait for replies from worker. Will wait for 1 second - by default or you can specify a custom ``timeout``. - :keyword timeout: Time in seconds to wait for replies if ``wait`` - enabled. - - """ - self.app.control.revoke([r.id for r in self.results], - connection=connection, timeout=timeout, - terminate=terminate, signal=signal, reply=wait) - - def __iter__(self): - return iter(self.results) - - def __getitem__(self, index): - """`res[i] -> res.results[i]`""" - return self.results[index] - - @deprecated('3.2', '3.3') - def iterate(self, timeout=None, propagate=True, interval=0.5): - """Deprecated method, use :meth:`get` with a callback argument.""" - elapsed = 0.0 - results = OrderedDict((result.id, copy(result)) - for result in self.results) - - while results: - removed = set() - for task_id, result in items(results): - if result.ready(): - yield result.get(timeout=timeout and timeout - elapsed, - propagate=propagate) - removed.add(task_id) - else: - if result.backend.subpolling_interval: - time.sleep(result.backend.subpolling_interval) - for task_id in removed: - results.pop(task_id, None) - time.sleep(interval) - elapsed += interval - if timeout and elapsed >= timeout: - raise TimeoutError('The operation timed out') - - def get(self, timeout=None, propagate=True, interval=0.5, - callback=None, no_ack=True): - """See :meth:`join` - - This is here for API compatibility with :class:`AsyncResult`, - in addition it uses :meth:`join_native` if available for the - current result backend. - - """ - return (self.join_native if self.supports_native_join else self.join)( - timeout=timeout, propagate=propagate, - interval=interval, callback=callback, no_ack=no_ack) - - def join(self, timeout=None, propagate=True, interval=0.5, - callback=None, no_ack=True): - """Gathers the results of all tasks as a list in order. - - .. note:: - - This can be an expensive operation for result store - backends that must resort to polling (e.g. database). - - You should consider using :meth:`join_native` if your backend - supports it. - - .. warning:: - - Waiting for tasks within a task may lead to deadlocks. - Please see :ref:`task-synchronous-subtasks`. - - :keyword timeout: The number of seconds to wait for results before - the operation times out. - - :keyword propagate: If any of the tasks raises an exception, the - exception will be re-raised. - - :keyword interval: Time to wait (in seconds) before retrying to - retrieve a result from the set. Note that this - does not have any effect when using the amqp - result store backend, as it does not use polling. - - :keyword callback: Optional callback to be called for every result - received. Must have signature ``(task_id, value)`` - No results will be returned by this function if - a callback is specified. The order of results - is also arbitrary when a callback is used. - To get access to the result object for a particular - id you will have to generate an index first: - ``index = {r.id: r for r in gres.results.values()}`` - Or you can create new result objects on the fly: - ``result = app.AsyncResult(task_id)`` (both will - take advantage of the backend cache anyway). - - :keyword no_ack: Automatic message acknowledgement (Note that if this - is set to :const:`False` then the messages *will not be - acknowledged*). - - :raises celery.exceptions.TimeoutError: if ``timeout`` is not - :const:`None` and the operation takes longer than ``timeout`` - seconds. - - """ - assert_will_not_block() - time_start = monotonic() - remaining = None - - results = [] - for result in self.results: - remaining = None - if timeout: - remaining = timeout - (monotonic() - time_start) - if remaining <= 0.0: - raise TimeoutError('join operation timed out') - value = result.get( - timeout=remaining, propagate=propagate, - interval=interval, no_ack=no_ack, - ) - if callback: - callback(result.id, value) - else: - results.append(value) - return results - - def iter_native(self, timeout=None, interval=0.5, no_ack=True): - """Backend optimized version of :meth:`iterate`. - - .. versionadded:: 2.2 - - Note that this does not support collecting the results - for different task types using different backends. - - This is currently only supported by the amqp, Redis and cache - result backends. - - """ - results = self.results - if not results: - return iter([]) - return self.backend.get_many( - set(r.id for r in results), - timeout=timeout, interval=interval, no_ack=no_ack, - ) - - def join_native(self, timeout=None, propagate=True, - interval=0.5, callback=None, no_ack=True): - """Backend optimized version of :meth:`join`. - - .. versionadded:: 2.2 - - Note that this does not support collecting the results - for different task types using different backends. - - This is currently only supported by the amqp, Redis and cache - result backends. - - """ - assert_will_not_block() - order_index = None if callback else dict( - (result.id, i) for i, result in enumerate(self.results) - ) - acc = None if callback else [None for _ in range(len(self))] - for task_id, meta in self.iter_native(timeout, interval, no_ack): - value = meta['result'] - if propagate and meta['status'] in states.PROPAGATE_STATES: - raise value - if callback: - callback(task_id, value) - else: - acc[order_index[task_id]] = value - return acc - - def _failed_join_report(self): - return (res for res in self.results - if res.backend.is_cached(res.id) and - res.state in states.PROPAGATE_STATES) - - def __len__(self): - return len(self.results) - - def __eq__(self, other): - if isinstance(other, ResultSet): - return other.results == self.results - return NotImplemented - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return '<{0}: [{1}]>'.format(type(self).__name__, - ', '.join(r.id for r in self.results)) - - @property - def subtasks(self): - """Deprecated alias to :attr:`results`.""" - return self.results - - @property - def supports_native_join(self): - try: - return self.results[0].supports_native_join - except IndexError: - pass - - @property - def backend(self): - return self.app.backend if self.app else self.results[0].backend - - -class GroupResult(ResultSet): - """Like :class:`ResultSet`, but with an associated id. - - This type is returned by :class:`~celery.group`, and the - deprecated TaskSet, meth:`~celery.task.TaskSet.apply_async` method. - - It enables inspection of the tasks state and return values as - a single entity. - - :param id: The id of the group. - :param results: List of result instances. - - """ - - #: The UUID of the group. - id = None - - #: List/iterator of results in the group - results = None - - def __init__(self, id=None, results=None, **kwargs): - self.id = id - ResultSet.__init__(self, results, **kwargs) - - def save(self, backend=None): - """Save group-result for later retrieval using :meth:`restore`. - - Example:: - - >>> def save_and_restore(result): - ... result.save() - ... result = GroupResult.restore(result.id) - - """ - return (backend or self.app.backend).save_group(self.id, self) - - def delete(self, backend=None): - """Remove this result if it was previously saved.""" - (backend or self.app.backend).delete_group(self.id) - - def __reduce__(self): - return self.__class__, self.__reduce_args__() - - def __reduce_args__(self): - return self.id, self.results - - def __bool__(self): - return bool(self.id or self.results) - __nonzero__ = __bool__ # Included for Py2 backwards compatibility - - def __eq__(self, other): - if isinstance(other, GroupResult): - return other.id == self.id and other.results == self.results - return NotImplemented - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return '<{0}: {1} [{2}]>'.format(type(self).__name__, self.id, - ', '.join(r.id for r in self.results)) - - def as_tuple(self): - return self.id, [r.as_tuple() for r in self.results] - serializable = as_tuple # XXX compat - - @property - def children(self): - return self.results - - @classmethod - def restore(self, id, backend=None): - """Restore previously saved group result.""" - return ( - backend or (self.app.backend if self.app else current_app.backend) - ).restore_group(id) - - -class TaskSetResult(GroupResult): - """Deprecated version of :class:`GroupResult`""" - - def __init__(self, taskset_id, results=None, **kwargs): - # XXX supports the taskset_id kwarg. - # XXX previously the "results" arg was named "subtasks". - if 'subtasks' in kwargs: - results = kwargs['subtasks'] - GroupResult.__init__(self, taskset_id, results, **kwargs) - - def itersubtasks(self): - """Deprecated. Use ``iter(self.results)`` instead.""" - return iter(self.results) - - @property - def total(self): - """Deprecated: Use ``len(r)``.""" - return len(self) - - @property - def taskset_id(self): - """compat alias to :attr:`self.id`""" - return self.id - - @taskset_id.setter # noqa - def taskset_id(self, id): - self.id = id - - -class EagerResult(AsyncResult): - """Result that we know has already been executed.""" - task_name = None - - def __init__(self, id, ret_value, state, traceback=None): - self.id = id - self._result = ret_value - self._state = state - self._traceback = traceback - - def _get_task_meta(self): - return {'task_id': self.id, 'result': self._result, 'status': - self._state, 'traceback': self._traceback} - - def __reduce__(self): - return self.__class__, self.__reduce_args__() - - def __reduce_args__(self): - return (self.id, self._result, self._state, self._traceback) - - def __copy__(self): - cls, args = self.__reduce__() - return cls(*args) - - def ready(self): - return True - - def get(self, timeout=None, propagate=True, **kwargs): - if self.successful(): - return self.result - elif self.state in states.PROPAGATE_STATES: - if propagate: - raise self.result - return self.result - wait = get - - def forget(self): - pass - - def revoke(self, *args, **kwargs): - self._state = states.REVOKED - - def __repr__(self): - return ''.format(self) - - @property - def result(self): - """The tasks return value""" - return self._result - - @property - def state(self): - """The tasks state.""" - return self._state - status = state - - @property - def traceback(self): - """The traceback if the task failed.""" - return self._traceback - - @property - def supports_native_join(self): - return False - - -def result_from_tuple(r, app=None): - # earlier backends may just pickle, so check if - # result is already prepared. - app = app_or_default(app) - Result = app.AsyncResult - if not isinstance(r, ResultBase): - res, nodes = r - if nodes: - return app.GroupResult( - res, [result_from_tuple(child, app) for child in nodes], - ) - # previously did not include parent - id, parent = res if isinstance(res, (list, tuple)) else (res, None) - if parent: - parent = result_from_tuple(parent, app) - return Result(id, parent=parent) - return r -from_serializable = result_from_tuple # XXX compat diff --git a/thesisenv/lib/python3.6/site-packages/celery/schedules.py b/thesisenv/lib/python3.6/site-packages/celery/schedules.py deleted file mode 100644 index 6424dfa..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/schedules.py +++ /dev/null @@ -1,593 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.schedules - ~~~~~~~~~~~~~~~~ - - Schedules define the intervals at which periodic tasks - should run. - -""" -from __future__ import absolute_import - -import numbers -import re - -from collections import namedtuple -from datetime import datetime, timedelta - -from kombu.utils import cached_property - -from . import current_app -from .five import range, string_t -from .utils import is_iterable -from .utils.timeutils import ( - timedelta_seconds, weekday, maybe_timedelta, remaining, - humanize_seconds, timezone, maybe_make_aware, ffwd -) -from .datastructures import AttributeDict - -__all__ = ['ParseException', 'schedule', 'crontab', 'crontab_parser', - 'maybe_schedule'] - -schedstate = namedtuple('schedstate', ('is_due', 'next')) - - -CRON_PATTERN_INVALID = """\ -Invalid crontab pattern. Valid range is {min}-{max}. \ -'{value}' was found.\ -""" - -CRON_INVALID_TYPE = """\ -Argument cronspec needs to be of any of the following types: \ -int, str, or an iterable type. {type!r} was given.\ -""" - -CRON_REPR = """\ -\ -""" - - -def cronfield(s): - return '*' if s is None else s - - -class ParseException(Exception): - """Raised by crontab_parser when the input can't be parsed.""" - - -class schedule(object): - """Schedule for periodic task. - - :param run_every: Interval in seconds (or a :class:`~datetime.timedelta`). - :param relative: If set to True the run time will be rounded to the - resolution of the interval. - :param nowfun: Function returning the current date and time - (class:`~datetime.datetime`). - :param app: Celery app instance. - - """ - relative = False - - def __init__(self, run_every=None, relative=False, nowfun=None, app=None): - self.run_every = maybe_timedelta(run_every) - self.relative = relative - self.nowfun = nowfun - self._app = app - - def now(self): - return (self.nowfun or self.app.now)() - - def remaining_estimate(self, last_run_at): - return remaining( - self.maybe_make_aware(last_run_at), self.run_every, - self.maybe_make_aware(self.now()), self.relative, - ) - - def is_due(self, last_run_at): - """Returns tuple of two items `(is_due, next_time_to_check)`, - where next time to check is in seconds. - - e.g. - - * `(True, 20)`, means the task should be run now, and the next - time to check is in 20 seconds. - - * `(False, 12.3)`, means the task is not due, but that the scheduler - should check again in 12.3 seconds. - - The next time to check is used to save energy/cpu cycles, - it does not need to be accurate but will influence the precision - of your schedule. You must also keep in mind - the value of :setting:`CELERYBEAT_MAX_LOOP_INTERVAL`, - which decides the maximum number of seconds the scheduler can - sleep between re-checking the periodic task intervals. So if you - have a task that changes schedule at runtime then your next_run_at - check will decide how long it will take before a change to the - schedule takes effect. The max loop interval takes precendence - over the next check at value returned. - - .. admonition:: Scheduler max interval variance - - The default max loop interval may vary for different schedulers. - For the default scheduler the value is 5 minutes, but for e.g. - the django-celery database scheduler the value is 5 seconds. - - """ - last_run_at = self.maybe_make_aware(last_run_at) - rem_delta = self.remaining_estimate(last_run_at) - remaining_s = timedelta_seconds(rem_delta) - if remaining_s == 0: - return schedstate(is_due=True, next=self.seconds) - return schedstate(is_due=False, next=remaining_s) - - def maybe_make_aware(self, dt): - if self.utc_enabled: - return maybe_make_aware(dt, self.tz) - return dt - - def __repr__(self): - return ''.format(self) - - def __eq__(self, other): - if isinstance(other, schedule): - return self.run_every == other.run_every - return self.run_every == other - - def __ne__(self, other): - return not self.__eq__(other) - - def __reduce__(self): - return self.__class__, (self.run_every, self.relative, self.nowfun) - - @property - def seconds(self): - return timedelta_seconds(self.run_every) - - @property - def human_seconds(self): - return humanize_seconds(self.seconds) - - @property - def app(self): - return self._app or current_app._get_current_object() - - @app.setter # noqa - def app(self, app): - self._app = app - - @cached_property - def tz(self): - return self.app.timezone - - @cached_property - def utc_enabled(self): - return self.app.conf.CELERY_ENABLE_UTC - - def to_local(self, dt): - if not self.utc_enabled: - return timezone.to_local_fallback(dt) - return dt - - -class crontab_parser(object): - """Parser for crontab expressions. Any expression of the form 'groups' - (see BNF grammar below) is accepted and expanded to a set of numbers. - These numbers represent the units of time that the crontab needs to - run on:: - - digit :: '0'..'9' - dow :: 'a'..'z' - number :: digit+ | dow+ - steps :: number - range :: number ( '-' number ) ? - numspec :: '*' | range - expr :: numspec ( '/' steps ) ? - groups :: expr ( ',' expr ) * - - The parser is a general purpose one, useful for parsing hours, minutes and - day_of_week expressions. Example usage:: - - >>> minutes = crontab_parser(60).parse('*/15') - [0, 15, 30, 45] - >>> hours = crontab_parser(24).parse('*/4') - [0, 4, 8, 12, 16, 20] - >>> day_of_week = crontab_parser(7).parse('*') - [0, 1, 2, 3, 4, 5, 6] - - It can also parse day_of_month and month_of_year expressions if initialized - with an minimum of 1. Example usage:: - - >>> days_of_month = crontab_parser(31, 1).parse('*/3') - [1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 31] - >>> months_of_year = crontab_parser(12, 1).parse('*/2') - [1, 3, 5, 7, 9, 11] - >>> months_of_year = crontab_parser(12, 1).parse('2-12/2') - [2, 4, 6, 8, 10, 12] - - The maximum possible expanded value returned is found by the formula:: - - max_ + min_ - 1 - - """ - ParseException = ParseException - - _range = r'(\w+?)-(\w+)' - _steps = r'/(\w+)?' - _star = r'\*' - - def __init__(self, max_=60, min_=0): - self.max_ = max_ - self.min_ = min_ - self.pats = ( - (re.compile(self._range + self._steps), self._range_steps), - (re.compile(self._range), self._expand_range), - (re.compile(self._star + self._steps), self._star_steps), - (re.compile('^' + self._star + '$'), self._expand_star), - ) - - def parse(self, spec): - acc = set() - for part in spec.split(','): - if not part: - raise self.ParseException('empty part') - acc |= set(self._parse_part(part)) - return acc - - def _parse_part(self, part): - for regex, handler in self.pats: - m = regex.match(part) - if m: - return handler(m.groups()) - return self._expand_range((part, )) - - def _expand_range(self, toks): - fr = self._expand_number(toks[0]) - if len(toks) > 1: - to = self._expand_number(toks[1]) - if to < fr: # Wrap around max_ if necessary - return (list(range(fr, self.min_ + self.max_)) + - list(range(self.min_, to + 1))) - return list(range(fr, to + 1)) - return [fr] - - def _range_steps(self, toks): - if len(toks) != 3 or not toks[2]: - raise self.ParseException('empty filter') - return self._expand_range(toks[:2])[::int(toks[2])] - - def _star_steps(self, toks): - if not toks or not toks[0]: - raise self.ParseException('empty filter') - return self._expand_star()[::int(toks[0])] - - def _expand_star(self, *args): - return list(range(self.min_, self.max_ + self.min_)) - - def _expand_number(self, s): - if isinstance(s, string_t) and s[0] == '-': - raise self.ParseException('negative numbers not supported') - try: - i = int(s) - except ValueError: - try: - i = weekday(s) - except KeyError: - raise ValueError('Invalid weekday literal {0!r}.'.format(s)) - - max_val = self.min_ + self.max_ - 1 - if i > max_val: - raise ValueError( - 'Invalid end range: {0} > {1}.'.format(i, max_val)) - if i < self.min_: - raise ValueError( - 'Invalid beginning range: {0} < {1}.'.format(i, self.min_)) - - return i - - -class crontab(schedule): - """A crontab can be used as the `run_every` value of a - :class:`PeriodicTask` to add cron-like scheduling. - - Like a :manpage:`cron` job, you can specify units of time of when - you would like the task to execute. It is a reasonably complete - implementation of cron's features, so it should provide a fair - degree of scheduling needs. - - You can specify a minute, an hour, a day of the week, a day of the - month, and/or a month in the year in any of the following formats: - - .. attribute:: minute - - - A (list of) integers from 0-59 that represent the minutes of - an hour of when execution should occur; or - - A string representing a crontab pattern. This may get pretty - advanced, like `minute='*/15'` (for every quarter) or - `minute='1,13,30-45,50-59/2'`. - - .. attribute:: hour - - - A (list of) integers from 0-23 that represent the hours of - a day of when execution should occur; or - - A string representing a crontab pattern. This may get pretty - advanced, like `hour='*/3'` (for every three hours) or - `hour='0,8-17/2'` (at midnight, and every two hours during - office hours). - - .. attribute:: day_of_week - - - A (list of) integers from 0-6, where Sunday = 0 and Saturday = - 6, that represent the days of a week that execution should - occur. - - A string representing a crontab pattern. This may get pretty - advanced, like `day_of_week='mon-fri'` (for weekdays only). - (Beware that `day_of_week='*/2'` does not literally mean - 'every two days', but 'every day that is divisible by two'!) - - .. attribute:: day_of_month - - - A (list of) integers from 1-31 that represents the days of the - month that execution should occur. - - A string representing a crontab pattern. This may get pretty - advanced, such as `day_of_month='2-30/3'` (for every even - numbered day) or `day_of_month='1-7,15-21'` (for the first and - third weeks of the month). - - .. attribute:: month_of_year - - - A (list of) integers from 1-12 that represents the months of - the year during which execution can occur. - - A string representing a crontab pattern. This may get pretty - advanced, such as `month_of_year='*/3'` (for the first month - of every quarter) or `month_of_year='2-12/2'` (for every even - numbered month). - - .. attribute:: nowfun - - Function returning the current date and time - (:class:`~datetime.datetime`). - - .. attribute:: app - - The Celery app instance. - - It is important to realize that any day on which execution should - occur must be represented by entries in all three of the day and - month attributes. For example, if `day_of_week` is 0 and `day_of_month` - is every seventh day, only months that begin on Sunday and are also - in the `month_of_year` attribute will have execution events. Or, - `day_of_week` is 1 and `day_of_month` is '1-7,15-21' means every - first and third monday of every month present in `month_of_year`. - - """ - - def __init__(self, minute='*', hour='*', day_of_week='*', - day_of_month='*', month_of_year='*', nowfun=None, app=None): - self._orig_minute = cronfield(minute) - self._orig_hour = cronfield(hour) - self._orig_day_of_week = cronfield(day_of_week) - self._orig_day_of_month = cronfield(day_of_month) - self._orig_month_of_year = cronfield(month_of_year) - self.hour = self._expand_cronspec(hour, 24) - self.minute = self._expand_cronspec(minute, 60) - self.day_of_week = self._expand_cronspec(day_of_week, 7) - self.day_of_month = self._expand_cronspec(day_of_month, 31, 1) - self.month_of_year = self._expand_cronspec(month_of_year, 12, 1) - self.nowfun = nowfun - self._app = app - - @staticmethod - def _expand_cronspec(cronspec, max_, min_=0): - """Takes the given cronspec argument in one of the forms:: - - int (like 7) - str (like '3-5,*/15', '*', or 'monday') - set (like set([0,15,30,45])) - list (like [8-17]) - - And convert it to an (expanded) set representing all time unit - values on which the crontab triggers. Only in case of the base - type being 'str', parsing occurs. (It is fast and - happens only once for each crontab instance, so there is no - significant performance overhead involved.) - - For the other base types, merely Python type conversions happen. - - The argument `max_` is needed to determine the expansion of '*' - and ranges. - The argument `min_` is needed to determine the expansion of '*' - and ranges for 1-based cronspecs, such as day of month or month - of year. The default is sufficient for minute, hour, and day of - week. - - """ - if isinstance(cronspec, numbers.Integral): - result = set([cronspec]) - elif isinstance(cronspec, string_t): - result = crontab_parser(max_, min_).parse(cronspec) - elif isinstance(cronspec, set): - result = cronspec - elif is_iterable(cronspec): - result = set(cronspec) - else: - raise TypeError(CRON_INVALID_TYPE.format(type=type(cronspec))) - - # assure the result does not preceed the min or exceed the max - for number in result: - if number >= max_ + min_ or number < min_: - raise ValueError(CRON_PATTERN_INVALID.format( - min=min_, max=max_ - 1 + min_, value=number)) - return result - - def _delta_to_next(self, last_run_at, next_hour, next_minute): - """ - Takes a datetime of last run, next minute and hour, and - returns a relativedelta for the next scheduled day and time. - Only called when day_of_month and/or month_of_year cronspec - is specified to further limit scheduled task execution. - """ - from bisect import bisect, bisect_left - - datedata = AttributeDict(year=last_run_at.year) - days_of_month = sorted(self.day_of_month) - months_of_year = sorted(self.month_of_year) - - def day_out_of_range(year, month, day): - try: - datetime(year=year, month=month, day=day) - except ValueError: - return True - return False - - def roll_over(): - while 1: - flag = (datedata.dom == len(days_of_month) or - day_out_of_range(datedata.year, - months_of_year[datedata.moy], - days_of_month[datedata.dom]) or - (self.maybe_make_aware(datetime(datedata.year, - months_of_year[datedata.moy], - days_of_month[datedata.dom])) < last_run_at)) - - if flag: - datedata.dom = 0 - datedata.moy += 1 - if datedata.moy == len(months_of_year): - datedata.moy = 0 - datedata.year += 1 - else: - break - - if last_run_at.month in self.month_of_year: - datedata.dom = bisect(days_of_month, last_run_at.day) - datedata.moy = bisect_left(months_of_year, last_run_at.month) - else: - datedata.dom = 0 - datedata.moy = bisect(months_of_year, last_run_at.month) - if datedata.moy == len(months_of_year): - datedata.moy = 0 - roll_over() - - while 1: - th = datetime(year=datedata.year, - month=months_of_year[datedata.moy], - day=days_of_month[datedata.dom]) - if th.isoweekday() % 7 in self.day_of_week: - break - datedata.dom += 1 - roll_over() - - return ffwd(year=datedata.year, - month=months_of_year[datedata.moy], - day=days_of_month[datedata.dom], - hour=next_hour, - minute=next_minute, - second=0, - microsecond=0) - - def now(self): - return (self.nowfun or self.app.now)() - - def __repr__(self): - return CRON_REPR.format(self) - - def __reduce__(self): - return (self.__class__, (self._orig_minute, - self._orig_hour, - self._orig_day_of_week, - self._orig_day_of_month, - self._orig_month_of_year), None) - - def remaining_delta(self, last_run_at, tz=None, ffwd=ffwd): - tz = tz or self.tz - last_run_at = self.maybe_make_aware(last_run_at) - now = self.maybe_make_aware(self.now()) - dow_num = last_run_at.isoweekday() % 7 # Sunday is day 0, not day 7 - - execute_this_date = (last_run_at.month in self.month_of_year and - last_run_at.day in self.day_of_month and - dow_num in self.day_of_week) - - execute_this_hour = (execute_this_date and - last_run_at.day == now.day and - last_run_at.month == now.month and - last_run_at.year == now.year and - last_run_at.hour in self.hour and - last_run_at.minute < max(self.minute)) - - if execute_this_hour: - next_minute = min(minute for minute in self.minute - if minute > last_run_at.minute) - delta = ffwd(minute=next_minute, second=0, microsecond=0) - else: - next_minute = min(self.minute) - execute_today = (execute_this_date and - last_run_at.hour < max(self.hour)) - - if execute_today: - next_hour = min(hour for hour in self.hour - if hour > last_run_at.hour) - delta = ffwd(hour=next_hour, minute=next_minute, - second=0, microsecond=0) - else: - next_hour = min(self.hour) - all_dom_moy = (self._orig_day_of_month == '*' and - self._orig_month_of_year == '*') - if all_dom_moy: - next_day = min([day for day in self.day_of_week - if day > dow_num] or self.day_of_week) - add_week = next_day == dow_num - - delta = ffwd(weeks=add_week and 1 or 0, - weekday=(next_day - 1) % 7, - hour=next_hour, - minute=next_minute, - second=0, - microsecond=0) - else: - delta = self._delta_to_next(last_run_at, - next_hour, next_minute) - return self.to_local(last_run_at), delta, self.to_local(now) - - def remaining_estimate(self, last_run_at, ffwd=ffwd): - """Returns when the periodic task should run next as a timedelta.""" - return remaining(*self.remaining_delta(last_run_at, ffwd=ffwd)) - - def is_due(self, last_run_at): - """Returns tuple of two items `(is_due, next_time_to_run)`, - where next time to run is in seconds. - - See :meth:`celery.schedules.schedule.is_due` for more information. - - """ - rem_delta = self.remaining_estimate(last_run_at) - rem = timedelta_seconds(rem_delta) - due = rem == 0 - if due: - rem_delta = self.remaining_estimate(self.now()) - rem = timedelta_seconds(rem_delta) - return schedstate(due, rem) - - def __eq__(self, other): - if isinstance(other, crontab): - return (other.month_of_year == self.month_of_year and - other.day_of_month == self.day_of_month and - other.day_of_week == self.day_of_week and - other.hour == self.hour and - other.minute == self.minute) - return NotImplemented - - def __ne__(self, other): - return not self.__eq__(other) - - -def maybe_schedule(s, relative=False, app=None): - if s is not None: - if isinstance(s, numbers.Integral): - s = timedelta(seconds=s) - if isinstance(s, timedelta): - return schedule(s, relative, app=app) - else: - s.app = app - return s diff --git a/thesisenv/lib/python3.6/site-packages/celery/security/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/security/__init__.py deleted file mode 100644 index 352d400..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/security/__init__.py +++ /dev/null @@ -1,71 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.security - ~~~~~~~~~~~~~~~ - - Module implementing the signing message serializer. - -""" -from __future__ import absolute_import - -from kombu.serialization import ( - registry, disable_insecure_serializers as _disable_insecure_serializers, -) - -from celery.exceptions import ImproperlyConfigured - -from .serialization import register_auth - -SSL_NOT_INSTALLED = """\ -You need to install the pyOpenSSL library to use the auth serializer. -Please install by: - - $ pip install pyOpenSSL -""" - -SETTING_MISSING = """\ -Sorry, but you have to configure the - * CELERY_SECURITY_KEY - * CELERY_SECURITY_CERTIFICATE, and the - * CELERY_SECURITY_CERT_STORE -configuration settings to use the auth serializer. - -Please see the configuration reference for more information. -""" - -__all__ = ['setup_security'] - - -def setup_security(allowed_serializers=None, key=None, cert=None, store=None, - digest='sha1', serializer='json', app=None): - """See :meth:`@Celery.setup_security`.""" - if app is None: - from celery import current_app - app = current_app._get_current_object() - - _disable_insecure_serializers(allowed_serializers) - - conf = app.conf - if conf.CELERY_TASK_SERIALIZER != 'auth': - return - - try: - from OpenSSL import crypto # noqa - except ImportError: - raise ImproperlyConfigured(SSL_NOT_INSTALLED) - - key = key or conf.CELERY_SECURITY_KEY - cert = cert or conf.CELERY_SECURITY_CERTIFICATE - store = store or conf.CELERY_SECURITY_CERT_STORE - - if not (key and cert and store): - raise ImproperlyConfigured(SETTING_MISSING) - - with open(key) as kf: - with open(cert) as cf: - register_auth(kf.read(), cf.read(), store, digest, serializer) - registry._set_default_serializer('auth') - - -def disable_untrusted_serializers(whitelist=None): - _disable_insecure_serializers(allowed=whitelist) diff --git a/thesisenv/lib/python3.6/site-packages/celery/security/certificate.py b/thesisenv/lib/python3.6/site-packages/celery/security/certificate.py deleted file mode 100644 index c1c520c..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/security/certificate.py +++ /dev/null @@ -1,93 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.security.certificate - ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - X.509 certificates. - -""" -from __future__ import absolute_import - -import glob -import os - -from kombu.utils.encoding import bytes_to_str - -from celery.exceptions import SecurityError -from celery.five import values - -from .utils import crypto, reraise_errors - -__all__ = ['Certificate', 'CertStore', 'FSCertStore'] - - -class Certificate(object): - """X.509 certificate.""" - - def __init__(self, cert): - assert crypto is not None - with reraise_errors('Invalid certificate: {0!r}'): - self._cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert) - - def has_expired(self): - """Check if the certificate has expired.""" - return self._cert.has_expired() - - def get_serial_number(self): - """Return the serial number in the certificate.""" - return bytes_to_str(self._cert.get_serial_number()) - - def get_issuer(self): - """Return issuer (CA) as a string""" - return ' '.join(bytes_to_str(x[1]) for x in - self._cert.get_issuer().get_components()) - - def get_id(self): - """Serial number/issuer pair uniquely identifies a certificate""" - return '{0} {1}'.format(self.get_issuer(), self.get_serial_number()) - - def verify(self, data, signature, digest): - """Verifies the signature for string containing data.""" - with reraise_errors('Bad signature: {0!r}'): - crypto.verify(self._cert, signature, data, digest) - - -class CertStore(object): - """Base class for certificate stores""" - - def __init__(self): - self._certs = {} - - def itercerts(self): - """an iterator over the certificates""" - for c in values(self._certs): - yield c - - def __getitem__(self, id): - """get certificate by id""" - try: - return self._certs[bytes_to_str(id)] - except KeyError: - raise SecurityError('Unknown certificate: {0!r}'.format(id)) - - def add_cert(self, cert): - cert_id = bytes_to_str(cert.get_id()) - if cert_id in self._certs: - raise SecurityError('Duplicate certificate: {0!r}'.format(id)) - self._certs[cert_id] = cert - - -class FSCertStore(CertStore): - """File system certificate store""" - - def __init__(self, path): - CertStore.__init__(self) - if os.path.isdir(path): - path = os.path.join(path, '*') - for p in glob.glob(path): - with open(p) as f: - cert = Certificate(f.read()) - if cert.has_expired(): - raise SecurityError( - 'Expired certificate: {0!r}'.format(cert.get_id())) - self.add_cert(cert) diff --git a/thesisenv/lib/python3.6/site-packages/celery/security/key.py b/thesisenv/lib/python3.6/site-packages/celery/security/key.py deleted file mode 100644 index a5c2620..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/security/key.py +++ /dev/null @@ -1,27 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.security.key - ~~~~~~~~~~~~~~~~~~~ - - Private key for the security serializer. - -""" -from __future__ import absolute_import - -from kombu.utils.encoding import ensure_bytes - -from .utils import crypto, reraise_errors - -__all__ = ['PrivateKey'] - - -class PrivateKey(object): - - def __init__(self, key): - with reraise_errors('Invalid private key: {0!r}'): - self._key = crypto.load_privatekey(crypto.FILETYPE_PEM, key) - - def sign(self, data, digest): - """sign string containing data.""" - with reraise_errors('Unable to sign data: {0!r}'): - return crypto.sign(self._key, ensure_bytes(data), digest) diff --git a/thesisenv/lib/python3.6/site-packages/celery/security/serialization.py b/thesisenv/lib/python3.6/site-packages/celery/security/serialization.py deleted file mode 100644 index 7548358..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/security/serialization.py +++ /dev/null @@ -1,110 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.security.serialization - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Secure serializer. - -""" -from __future__ import absolute_import - -import base64 - -from kombu.serialization import registry, dumps, loads -from kombu.utils.encoding import bytes_to_str, str_to_bytes, ensure_bytes - -from .certificate import Certificate, FSCertStore -from .key import PrivateKey -from .utils import reraise_errors - -__all__ = ['SecureSerializer', 'register_auth'] - - -def b64encode(s): - return bytes_to_str(base64.b64encode(str_to_bytes(s))) - - -def b64decode(s): - return base64.b64decode(str_to_bytes(s)) - - -class SecureSerializer(object): - - def __init__(self, key=None, cert=None, cert_store=None, - digest='sha1', serializer='json'): - self._key = key - self._cert = cert - self._cert_store = cert_store - self._digest = digest - self._serializer = serializer - - def serialize(self, data): - """serialize data structure into string""" - assert self._key is not None - assert self._cert is not None - with reraise_errors('Unable to serialize: {0!r}', (Exception, )): - content_type, content_encoding, body = dumps( - bytes_to_str(data), serializer=self._serializer) - # What we sign is the serialized body, not the body itself. - # this way the receiver doesn't have to decode the contents - # to verify the signature (and thus avoiding potential flaws - # in the decoding step). - body = ensure_bytes(body) - return self._pack(body, content_type, content_encoding, - signature=self._key.sign(body, self._digest), - signer=self._cert.get_id()) - - def deserialize(self, data): - """deserialize data structure from string""" - assert self._cert_store is not None - with reraise_errors('Unable to deserialize: {0!r}', (Exception, )): - payload = self._unpack(data) - signature, signer, body = (payload['signature'], - payload['signer'], - payload['body']) - self._cert_store[signer].verify(body, signature, self._digest) - return loads(bytes_to_str(body), payload['content_type'], - payload['content_encoding'], force=True) - - def _pack(self, body, content_type, content_encoding, signer, signature, - sep=str_to_bytes('\x00\x01')): - fields = sep.join( - ensure_bytes(s) for s in [signer, signature, content_type, - content_encoding, body] - ) - return b64encode(fields) - - def _unpack(self, payload, sep=str_to_bytes('\x00\x01')): - raw_payload = b64decode(ensure_bytes(payload)) - first_sep = raw_payload.find(sep) - - signer = raw_payload[:first_sep] - signer_cert = self._cert_store[signer] - - sig_len = signer_cert._cert.get_pubkey().bits() >> 3 - signature = raw_payload[ - first_sep + len(sep):first_sep + len(sep) + sig_len - ] - end_of_sig = first_sep + len(sep) + sig_len + len(sep) - - v = raw_payload[end_of_sig:].split(sep) - - return { - 'signer': signer, - 'signature': signature, - 'content_type': bytes_to_str(v[0]), - 'content_encoding': bytes_to_str(v[1]), - 'body': bytes_to_str(v[2]), - } - - -def register_auth(key=None, cert=None, store=None, digest='sha1', - serializer='json'): - """register security serializer""" - s = SecureSerializer(key and PrivateKey(key), - cert and Certificate(cert), - store and FSCertStore(store), - digest=digest, serializer=serializer) - registry.register('auth', s.serialize, s.deserialize, - content_type='application/data', - content_encoding='utf-8') diff --git a/thesisenv/lib/python3.6/site-packages/celery/security/utils.py b/thesisenv/lib/python3.6/site-packages/celery/security/utils.py deleted file mode 100644 index d184d0b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/security/utils.py +++ /dev/null @@ -1,35 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.security.utils - ~~~~~~~~~~~~~~~~~~~~~ - - Utilities used by the message signing serializer. - -""" -from __future__ import absolute_import - -import sys - -from contextlib import contextmanager - -from celery.exceptions import SecurityError -from celery.five import reraise - -try: - from OpenSSL import crypto -except ImportError: # pragma: no cover - crypto = None # noqa - -__all__ = ['reraise_errors'] - - -@contextmanager -def reraise_errors(msg='{0!r}', errors=None): - assert crypto is not None - errors = (crypto.Error, ) if errors is None else errors - try: - yield - except errors as exc: - reraise(SecurityError, - SecurityError(msg.format(exc)), - sys.exc_info()[2]) diff --git a/thesisenv/lib/python3.6/site-packages/celery/signals.py b/thesisenv/lib/python3.6/site-packages/celery/signals.py deleted file mode 100644 index 2091830..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/signals.py +++ /dev/null @@ -1,76 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.signals - ~~~~~~~~~~~~~~ - - This module defines the signals (Observer pattern) sent by - both workers and clients. - - Functions can be connected to these signals, and connected - functions are called whenever a signal is called. - - See :ref:`signals` for more information. - -""" -from __future__ import absolute_import -from .utils.dispatch import Signal - -__all__ = ['before_task_publish', 'after_task_publish', - 'task_prerun', 'task_postrun', 'task_success', - 'task_retry', 'task_failure', 'task_revoked', 'celeryd_init', - 'celeryd_after_setup', 'worker_init', 'worker_process_init', - 'worker_ready', 'worker_shutdown', 'setup_logging', - 'after_setup_logger', 'after_setup_task_logger', - 'beat_init', 'beat_embedded_init', 'eventlet_pool_started', - 'eventlet_pool_preshutdown', 'eventlet_pool_postshutdown', - 'eventlet_pool_apply'] - -before_task_publish = Signal(providing_args=[ - 'body', 'exchange', 'routing_key', 'headers', 'properties', - 'declare', 'retry_policy', -]) -after_task_publish = Signal(providing_args=[ - 'body', 'exchange', 'routing_key', -]) -#: Deprecated, use after_task_publish instead. -task_sent = Signal(providing_args=[ - 'task_id', 'task', 'args', 'kwargs', 'eta', 'taskset', -]) -task_prerun = Signal(providing_args=['task_id', 'task', 'args', 'kwargs']) -task_postrun = Signal(providing_args=[ - 'task_id', 'task', 'args', 'kwargs', 'retval', -]) -task_success = Signal(providing_args=['result']) -task_retry = Signal(providing_args=[ - 'request', 'reason', 'einfo', -]) -task_failure = Signal(providing_args=[ - 'task_id', 'exception', 'args', 'kwargs', 'traceback', 'einfo', -]) -task_revoked = Signal(providing_args=[ - 'request', 'terminated', 'signum', 'expired', -]) -celeryd_init = Signal(providing_args=['instance', 'conf', 'options']) -celeryd_after_setup = Signal(providing_args=['instance', 'conf']) -import_modules = Signal(providing_args=[]) -worker_init = Signal(providing_args=[]) -worker_process_init = Signal(providing_args=[]) -worker_process_shutdown = Signal(providing_args=[]) -worker_ready = Signal(providing_args=[]) -worker_shutdown = Signal(providing_args=[]) -setup_logging = Signal(providing_args=[ - 'loglevel', 'logfile', 'format', 'colorize', -]) -after_setup_logger = Signal(providing_args=[ - 'logger', 'loglevel', 'logfile', 'format', 'colorize', -]) -after_setup_task_logger = Signal(providing_args=[ - 'logger', 'loglevel', 'logfile', 'format', 'colorize', -]) -beat_init = Signal(providing_args=[]) -beat_embedded_init = Signal(providing_args=[]) -eventlet_pool_started = Signal(providing_args=[]) -eventlet_pool_preshutdown = Signal(providing_args=[]) -eventlet_pool_postshutdown = Signal(providing_args=[]) -eventlet_pool_apply = Signal(providing_args=['target', 'args', 'kwargs']) -user_preload_options = Signal(providing_args=['app', 'options']) diff --git a/thesisenv/lib/python3.6/site-packages/celery/states.py b/thesisenv/lib/python3.6/site-packages/celery/states.py deleted file mode 100644 index 665a57b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/states.py +++ /dev/null @@ -1,153 +0,0 @@ -# -*- coding: utf-8 -*- -""" -celery.states -============= - -Built-in task states. - -.. _states: - -States ------- - -See :ref:`task-states`. - -.. _statesets: - -Sets ----- - -.. state:: READY_STATES - -READY_STATES -~~~~~~~~~~~~ - -Set of states meaning the task result is ready (has been executed). - -.. state:: UNREADY_STATES - -UNREADY_STATES -~~~~~~~~~~~~~~ - -Set of states meaning the task result is not ready (has not been executed). - -.. state:: EXCEPTION_STATES - -EXCEPTION_STATES -~~~~~~~~~~~~~~~~ - -Set of states meaning the task returned an exception. - -.. state:: PROPAGATE_STATES - -PROPAGATE_STATES -~~~~~~~~~~~~~~~~ - -Set of exception states that should propagate exceptions to the user. - -.. state:: ALL_STATES - -ALL_STATES -~~~~~~~~~~ - -Set of all possible states. - - -Misc. ------ - -""" -from __future__ import absolute_import - -__all__ = ['PENDING', 'RECEIVED', 'STARTED', 'SUCCESS', 'FAILURE', - 'REVOKED', 'RETRY', 'IGNORED', 'READY_STATES', 'UNREADY_STATES', - 'EXCEPTION_STATES', 'PROPAGATE_STATES', 'precedence', 'state'] - -#: State precedence. -#: None represents the precedence of an unknown state. -#: Lower index means higher precedence. -PRECEDENCE = ['SUCCESS', - 'FAILURE', - None, - 'REVOKED', - 'STARTED', - 'RECEIVED', - 'RETRY', - 'PENDING'] - -#: Hash lookup of PRECEDENCE to index -PRECEDENCE_LOOKUP = dict(zip(PRECEDENCE, range(0, len(PRECEDENCE)))) -NONE_PRECEDENCE = PRECEDENCE_LOOKUP[None] - - -def precedence(state): - """Get the precedence index for state. - - Lower index means higher precedence. - - """ - try: - return PRECEDENCE_LOOKUP[state] - except KeyError: - return NONE_PRECEDENCE - - -class state(str): - """State is a subclass of :class:`str`, implementing comparison - methods adhering to state precedence rules:: - - >>> from celery.states import state, PENDING, SUCCESS - - >>> state(PENDING) < state(SUCCESS) - True - - Any custom state is considered to be lower than :state:`FAILURE` and - :state:`SUCCESS`, but higher than any of the other built-in states:: - - >>> state('PROGRESS') > state(STARTED) - True - - >>> state('PROGRESS') > state('SUCCESS') - False - - """ - - def compare(self, other, fun): - return fun(precedence(self), precedence(other)) - - def __gt__(self, other): - return precedence(self) < precedence(other) - - def __ge__(self, other): - return precedence(self) <= precedence(other) - - def __lt__(self, other): - return precedence(self) > precedence(other) - - def __le__(self, other): - return precedence(self) >= precedence(other) - -#: Task state is unknown (assumed pending since you know the id). -PENDING = 'PENDING' -#: Task was received by a worker. -RECEIVED = 'RECEIVED' -#: Task was started by a worker (:setting:`CELERY_TRACK_STARTED`). -STARTED = 'STARTED' -#: Task succeeded -SUCCESS = 'SUCCESS' -#: Task failed -FAILURE = 'FAILURE' -#: Task was revoked. -REVOKED = 'REVOKED' -#: Task is waiting for retry. -RETRY = 'RETRY' -IGNORED = 'IGNORED' -REJECTED = 'REJECTED' - -READY_STATES = frozenset([SUCCESS, FAILURE, REVOKED]) -UNREADY_STATES = frozenset([PENDING, RECEIVED, STARTED, RETRY]) -EXCEPTION_STATES = frozenset([RETRY, FAILURE, REVOKED]) -PROPAGATE_STATES = frozenset([FAILURE, REVOKED]) - -ALL_STATES = frozenset([PENDING, RECEIVED, STARTED, - SUCCESS, FAILURE, RETRY, REVOKED]) diff --git a/thesisenv/lib/python3.6/site-packages/celery/task/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/task/__init__.py deleted file mode 100644 index 4ab1a2f..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/task/__init__.py +++ /dev/null @@ -1,59 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.task - ~~~~~~~~~~~ - - This is the old task module, it should not be used anymore, - import from the main 'celery' module instead. - If you're looking for the decorator implementation then that's in - ``celery.app.base.Celery.task``. - -""" -from __future__ import absolute_import - -from celery._state import current_app, current_task as current -from celery.five import LazyModule, recreate_module -from celery.local import Proxy - -__all__ = [ - 'BaseTask', 'Task', 'PeriodicTask', 'task', 'periodic_task', - 'group', 'chord', 'subtask', 'TaskSet', -] - - -STATICA_HACK = True -globals()['kcah_acitats'[::-1].upper()] = False -if STATICA_HACK: # pragma: no cover - # This is never executed, but tricks static analyzers (PyDev, PyCharm, - # pylint, etc.) into knowing the types of these symbols, and what - # they contain. - from celery.canvas import group, chord, subtask - from .base import BaseTask, Task, PeriodicTask, task, periodic_task - from .sets import TaskSet - - -class module(LazyModule): - - def __call__(self, *args, **kwargs): - return self.task(*args, **kwargs) - - -old_module, new_module = recreate_module( # pragma: no cover - __name__, - by_module={ - 'celery.task.base': ['BaseTask', 'Task', 'PeriodicTask', - 'task', 'periodic_task'], - 'celery.canvas': ['group', 'chord', 'subtask'], - 'celery.task.sets': ['TaskSet'], - }, - base=module, - __package__='celery.task', - __file__=__file__, - __path__=__path__, - __doc__=__doc__, - current=current, - discard_all=Proxy(lambda: current_app.control.purge), - backend_cleanup=Proxy( - lambda: current_app.tasks['celery.backend_cleanup'] - ), -) diff --git a/thesisenv/lib/python3.6/site-packages/celery/task/base.py b/thesisenv/lib/python3.6/site-packages/celery/task/base.py deleted file mode 100644 index aeb9f82..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/task/base.py +++ /dev/null @@ -1,179 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.task.base - ~~~~~~~~~~~~~~~~ - - The task implementation has been moved to :mod:`celery.app.task`. - - This contains the backward compatible Task class used in the old API, - and shouldn't be used in new applications. - -""" -from __future__ import absolute_import - -from kombu import Exchange - -from celery import current_app -from celery.app.task import Context, TaskType, Task as BaseTask # noqa -from celery.five import class_property, reclassmethod -from celery.schedules import maybe_schedule -from celery.utils.log import get_task_logger - -__all__ = ['Task', 'PeriodicTask', 'task'] - -#: list of methods that must be classmethods in the old API. -_COMPAT_CLASSMETHODS = ( - 'delay', 'apply_async', 'retry', 'apply', 'subtask_from_request', - 'AsyncResult', 'subtask', '_get_request', '_get_exec_options', -) - - -class Task(BaseTask): - """Deprecated Task base class. - - Modern applications should use :class:`celery.Task` instead. - - """ - abstract = True - __bound__ = False - __v2_compat__ = True - - # - Deprecated compat. attributes -: - - queue = None - routing_key = None - exchange = None - exchange_type = None - delivery_mode = None - mandatory = False # XXX deprecated - immediate = False # XXX deprecated - priority = None - type = 'regular' - disable_error_emails = False - accept_magic_kwargs = False - - from_config = BaseTask.from_config + ( - ('exchange_type', 'CELERY_DEFAULT_EXCHANGE_TYPE'), - ('delivery_mode', 'CELERY_DEFAULT_DELIVERY_MODE'), - ) - - # In old Celery the @task decorator didn't exist, so one would create - # classes instead and use them directly (e.g. MyTask.apply_async()). - # the use of classmethods was a hack so that it was not necessary - # to instantiate the class before using it, but it has only - # given us pain (like all magic). - for name in _COMPAT_CLASSMETHODS: - locals()[name] = reclassmethod(getattr(BaseTask, name)) - - @class_property - def request(cls): - return cls._get_request() - - @class_property - def backend(cls): - if cls._backend is None: - return cls.app.backend - return cls._backend - - @backend.setter - def backend(cls, value): # noqa - cls._backend = value - - @classmethod - def get_logger(self, **kwargs): - return get_task_logger(self.name) - - @classmethod - def establish_connection(self): - """Deprecated method used to get a broker connection. - - Should be replaced with :meth:`@Celery.connection` - instead, or by acquiring connections from the connection pool: - - .. code-block:: python - - # using the connection pool - with celery.pool.acquire(block=True) as conn: - ... - - # establish fresh connection - with celery.connection() as conn: - ... - """ - return self._get_app().connection() - - def get_publisher(self, connection=None, exchange=None, - exchange_type=None, **options): - """Deprecated method to get the task publisher (now called producer). - - Should be replaced with :class:`@amqp.TaskProducer`: - - .. code-block:: python - - with celery.connection() as conn: - with celery.amqp.TaskProducer(conn) as prod: - my_task.apply_async(producer=prod) - - """ - exchange = self.exchange if exchange is None else exchange - if exchange_type is None: - exchange_type = self.exchange_type - connection = connection or self.establish_connection() - return self._get_app().amqp.TaskProducer( - connection, - exchange=exchange and Exchange(exchange, exchange_type), - routing_key=self.routing_key, **options - ) - - @classmethod - def get_consumer(self, connection=None, queues=None, **kwargs): - """Deprecated method used to get consumer for the queue - this task is sent to. - - Should be replaced with :class:`@amqp.TaskConsumer` instead: - - """ - Q = self._get_app().amqp - connection = connection or self.establish_connection() - if queues is None: - queues = Q.queues[self.queue] if self.queue else Q.default_queue - return Q.TaskConsumer(connection, queues, **kwargs) - - -class PeriodicTask(Task): - """A periodic task is a task that adds itself to the - :setting:`CELERYBEAT_SCHEDULE` setting.""" - abstract = True - ignore_result = True - relative = False - options = None - compat = True - - def __init__(self): - if not hasattr(self, 'run_every'): - raise NotImplementedError( - 'Periodic tasks must have a run_every attribute') - self.run_every = maybe_schedule(self.run_every, self.relative) - super(PeriodicTask, self).__init__() - - @classmethod - def on_bound(cls, app): - app.conf.CELERYBEAT_SCHEDULE[cls.name] = { - 'task': cls.name, - 'schedule': cls.run_every, - 'args': (), - 'kwargs': {}, - 'options': cls.options or {}, - 'relative': cls.relative, - } - - -def task(*args, **kwargs): - """Deprecated decorator, please use :func:`celery.task`.""" - return current_app.task(*args, **dict({'accept_magic_kwargs': False, - 'base': Task}, **kwargs)) - - -def periodic_task(*args, **options): - """Deprecated decorator, please use :setting:`CELERYBEAT_SCHEDULE`.""" - return task(**dict({'base': PeriodicTask}, **options)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/task/http.py b/thesisenv/lib/python3.6/site-packages/celery/task/http.py deleted file mode 100644 index e170ec3..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/task/http.py +++ /dev/null @@ -1,220 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.task.http - ~~~~~~~~~~~~~~~~ - - Webhook task implementation. - -""" -from __future__ import absolute_import - -import anyjson -import sys - -try: - from urllib.parse import parse_qsl, urlencode, urlparse # Py3 -except ImportError: # pragma: no cover - from urllib import urlencode # noqa - from urlparse import urlparse, parse_qsl # noqa - -from celery import shared_task, __version__ as celery_version -from celery.five import items, reraise -from celery.utils.log import get_task_logger - -__all__ = ['InvalidResponseError', 'RemoteExecuteError', 'UnknownStatusError', - 'HttpDispatch', 'dispatch', 'URL'] - -GET_METHODS = frozenset(['GET', 'HEAD']) -logger = get_task_logger(__name__) - - -if sys.version_info[0] == 3: # pragma: no cover - - from urllib.request import Request, urlopen - - def utf8dict(tup): - if not isinstance(tup, dict): - return dict(tup) - return tup - -else: - - from urllib2 import Request, urlopen # noqa - - def utf8dict(tup): # noqa - """With a dict's items() tuple return a new dict with any utf-8 - keys/values encoded.""" - return dict( - (k.encode('utf-8'), - v.encode('utf-8') if isinstance(v, unicode) else v) # noqa - for k, v in tup) - - -class InvalidResponseError(Exception): - """The remote server gave an invalid response.""" - - -class RemoteExecuteError(Exception): - """The remote task gave a custom error.""" - - -class UnknownStatusError(InvalidResponseError): - """The remote server gave an unknown status.""" - - -def extract_response(raw_response, loads=anyjson.loads): - """Extract the response text from a raw JSON response.""" - if not raw_response: - raise InvalidResponseError('Empty response') - try: - payload = loads(raw_response) - except ValueError as exc: - reraise(InvalidResponseError, InvalidResponseError( - str(exc)), sys.exc_info()[2]) - - status = payload['status'] - if status == 'success': - return payload['retval'] - elif status == 'failure': - raise RemoteExecuteError(payload.get('reason')) - else: - raise UnknownStatusError(str(status)) - - -class MutableURL(object): - """Object wrapping a Uniform Resource Locator. - - Supports editing the query parameter list. - You can convert the object back to a string, the query will be - properly urlencoded. - - Examples - - >>> url = URL('http://www.google.com:6580/foo/bar?x=3&y=4#foo') - >>> url.query - {'x': '3', 'y': '4'} - >>> str(url) - 'http://www.google.com:6580/foo/bar?y=4&x=3#foo' - >>> url.query['x'] = 10 - >>> url.query.update({'George': 'Costanza'}) - >>> str(url) - 'http://www.google.com:6580/foo/bar?y=4&x=10&George=Costanza#foo' - - """ - def __init__(self, url): - self.parts = urlparse(url) - self.query = dict(parse_qsl(self.parts[4])) - - def __str__(self): - scheme, netloc, path, params, query, fragment = self.parts - query = urlencode(utf8dict(items(self.query))) - components = [scheme + '://', netloc, path or '/', - ';{0}'.format(params) if params else '', - '?{0}'.format(query) if query else '', - '#{0}'.format(fragment) if fragment else ''] - return ''.join(c for c in components if c) - - def __repr__(self): - return '<{0}: {1}>'.format(type(self).__name__, self) - - -class HttpDispatch(object): - """Make task HTTP request and collect the task result. - - :param url: The URL to request. - :param method: HTTP method used. Currently supported methods are `GET` - and `POST`. - :param task_kwargs: Task keyword arguments. - :param logger: Logger used for user/system feedback. - - """ - user_agent = 'celery/{version}'.format(version=celery_version) - timeout = 5 - - def __init__(self, url, method, task_kwargs, **kwargs): - self.url = url - self.method = method - self.task_kwargs = task_kwargs - self.logger = kwargs.get('logger') or logger - - def make_request(self, url, method, params): - """Perform HTTP request and return the response.""" - request = Request(url, params) - for key, val in items(self.http_headers): - request.add_header(key, val) - response = urlopen(request) # user catches errors. - return response.read() - - def dispatch(self): - """Dispatch callback and return result.""" - url = MutableURL(self.url) - params = None - if self.method in GET_METHODS: - url.query.update(self.task_kwargs) - else: - params = urlencode(utf8dict(items(self.task_kwargs))) - raw_response = self.make_request(str(url), self.method, params) - return extract_response(raw_response) - - @property - def http_headers(self): - headers = {'User-Agent': self.user_agent} - return headers - - -@shared_task(name='celery.http_dispatch', bind=True, - url=None, method=None, accept_magic_kwargs=False) -def dispatch(self, url=None, method='GET', **kwargs): - """Task dispatching to an URL. - - :keyword url: The URL location of the HTTP callback task. - :keyword method: Method to use when dispatching the callback. Usually - `GET` or `POST`. - :keyword \*\*kwargs: Keyword arguments to pass on to the HTTP callback. - - .. attribute:: url - - If this is set, this is used as the default URL for requests. - Default is to require the user of the task to supply the url as an - argument, as this attribute is intended for subclasses. - - .. attribute:: method - - If this is set, this is the default method used for requests. - Default is to require the user of the task to supply the method as an - argument, as this attribute is intended for subclasses. - - """ - return HttpDispatch( - url or self.url, method or self.method, kwargs, - ).dispatch() - - -class URL(MutableURL): - """HTTP Callback URL - - Supports requesting an URL asynchronously. - - :param url: URL to request. - :keyword dispatcher: Class used to dispatch the request. - By default this is :func:`dispatch`. - - """ - dispatcher = None - - def __init__(self, url, dispatcher=None, app=None): - super(URL, self).__init__(url) - self.app = app - self.dispatcher = dispatcher or self.dispatcher - if self.dispatcher is None: - # Get default dispatcher - self.dispatcher = ( - self.app.tasks['celery.http_dispatch'] if self.app - else dispatch - ) - - def get_async(self, **kwargs): - return self.dispatcher.delay(str(self), 'GET', **kwargs) - - def post_async(self, **kwargs): - return self.dispatcher.delay(str(self), 'POST', **kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/celery/task/sets.py b/thesisenv/lib/python3.6/site-packages/celery/task/sets.py deleted file mode 100644 index e277b79..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/task/sets.py +++ /dev/null @@ -1,88 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.task.sets - ~~~~~~~~~~~~~~~~ - - Old ``group`` implementation, this module should - not be used anymore use :func:`celery.group` instead. - -""" -from __future__ import absolute_import - -from celery._state import get_current_worker_task -from celery.app import app_or_default -from celery.canvas import maybe_signature # noqa -from celery.utils import uuid, warn_deprecated - -from celery.canvas import subtask # noqa - -warn_deprecated( - 'celery.task.sets and TaskSet', removal='4.0', - alternative="""\ -Please use "group" instead (see the Canvas section in the userguide)\ -""") - - -class TaskSet(list): - """A task containing several subtasks, making it possible - to track how many, or when all of the tasks have been completed. - - :param tasks: A list of :class:`subtask` instances. - - Example:: - - >>> from myproj.tasks import refresh_feed - - >>> urls = ('http://cnn.com/rss', 'http://bbc.co.uk/rss') - >>> s = TaskSet(refresh_feed.s(url) for url in urls) - >>> taskset_result = s.apply_async() - >>> list_of_return_values = taskset_result.join() # *expensive* - - """ - app = None - - def __init__(self, tasks=None, app=None, Publisher=None): - self.app = app_or_default(app or self.app) - super(TaskSet, self).__init__( - maybe_signature(t, app=self.app) for t in tasks or [] - ) - self.Publisher = Publisher or self.app.amqp.TaskProducer - self.total = len(self) # XXX compat - - def apply_async(self, connection=None, publisher=None, taskset_id=None): - """Apply TaskSet.""" - app = self.app - - if app.conf.CELERY_ALWAYS_EAGER: - return self.apply(taskset_id=taskset_id) - - with app.connection_or_acquire(connection) as conn: - setid = taskset_id or uuid() - pub = publisher or self.Publisher(conn) - results = self._async_results(setid, pub) - - result = app.TaskSetResult(setid, results) - parent = get_current_worker_task() - if parent: - parent.add_trail(result) - return result - - def _async_results(self, taskset_id, publisher): - return [task.apply_async(taskset_id=taskset_id, publisher=publisher) - for task in self] - - def apply(self, taskset_id=None): - """Applies the TaskSet locally by blocking until all tasks return.""" - setid = taskset_id or uuid() - return self.app.TaskSetResult(setid, self._sync_results(setid)) - - def _sync_results(self, taskset_id): - return [task.apply(taskset_id=taskset_id) for task in self] - - @property - def tasks(self): - return self - - @tasks.setter # noqa - def tasks(self, tasks): - self[:] = tasks diff --git a/thesisenv/lib/python3.6/site-packages/celery/task/trace.py b/thesisenv/lib/python3.6/site-packages/celery/task/trace.py deleted file mode 100644 index 43f19cb..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/task/trace.py +++ /dev/null @@ -1,12 +0,0 @@ -"""This module has moved to celery.app.trace.""" -from __future__ import absolute_import - -import sys - -from celery.app import trace -from celery.utils import warn_deprecated - -warn_deprecated('celery.task.trace', removal='3.2', - alternative='Please use celery.app.trace instead.') - -sys.modules[__name__] = trace diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/__init__.py deleted file mode 100644 index 9667872..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/__init__.py +++ /dev/null @@ -1,87 +0,0 @@ -from __future__ import absolute_import - -import logging -import os -import sys -import warnings - -from importlib import import_module - -try: - WindowsError = WindowsError # noqa -except NameError: - - class WindowsError(Exception): - pass - - -def setup(): - os.environ.update( - # warn if config module not found - C_WNOCONF='yes', - KOMBU_DISABLE_LIMIT_PROTECTION='yes', - ) - - if os.environ.get('COVER_ALL_MODULES') or '--with-coverage' in sys.argv: - from warnings import catch_warnings - with catch_warnings(record=True): - import_all_modules() - warnings.resetwarnings() - from celery.tests.case import Trap - from celery._state import set_default_app - set_default_app(Trap()) - - -def teardown(): - # Don't want SUBDEBUG log messages at finalization. - try: - from multiprocessing.util import get_logger - except ImportError: - pass - else: - get_logger().setLevel(logging.WARNING) - - # Make sure test database is removed. - import os - if os.path.exists('test.db'): - try: - os.remove('test.db') - except WindowsError: - pass - - # Make sure there are no remaining threads at shutdown. - import threading - remaining_threads = [thread for thread in threading.enumerate() - if thread.getName() != 'MainThread'] - if remaining_threads: - sys.stderr.write( - '\n\n**WARNING**: Remaining threads at teardown: %r...\n' % ( - remaining_threads)) - - -def find_distribution_modules(name=__name__, file=__file__): - current_dist_depth = len(name.split('.')) - 1 - current_dist = os.path.join(os.path.dirname(file), - *([os.pardir] * current_dist_depth)) - abs = os.path.abspath(current_dist) - dist_name = os.path.basename(abs) - - for dirpath, dirnames, filenames in os.walk(abs): - package = (dist_name + dirpath[len(abs):]).replace('/', '.') - if '__init__.py' in filenames: - yield package - for filename in filenames: - if filename.endswith('.py') and filename != '__init__.py': - yield '.'.join([package, filename])[:-3] - - -def import_all_modules(name=__name__, file=__file__, - skip=('celery.decorators', - 'celery.contrib.batches', - 'celery.task')): - for module in find_distribution_modules(name, file): - if not module.startswith(skip): - try: - import_module(module) - except ImportError: - pass diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_amqp.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_amqp.py deleted file mode 100644 index efb398a..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_amqp.py +++ /dev/null @@ -1,228 +0,0 @@ -from __future__ import absolute_import - -import datetime - -import pytz - -from kombu import Exchange, Queue - -from celery.app.amqp import Queues, TaskPublisher -from celery.five import keys -from celery.tests.case import AppCase, Mock - - -class test_TaskProducer(AppCase): - - def test__exit__(self): - publisher = self.app.amqp.TaskProducer(self.app.connection()) - publisher.release = Mock() - with publisher: - pass - publisher.release.assert_called_with() - - def test_declare(self): - publisher = self.app.amqp.TaskProducer(self.app.connection()) - publisher.exchange.name = 'foo' - publisher.declare() - publisher.exchange.name = None - publisher.declare() - - def test_retry_policy(self): - prod = self.app.amqp.TaskProducer(Mock()) - prod.channel.connection.client.declared_entities = set() - prod.publish_task('tasks.add', (2, 2), {}, - retry_policy={'frobulate': 32.4}) - - def test_publish_no_retry(self): - prod = self.app.amqp.TaskProducer(Mock()) - prod.channel.connection.client.declared_entities = set() - prod.publish_task('tasks.add', (2, 2), {}, retry=False, chord=123) - self.assertFalse(prod.connection.ensure.call_count) - - def test_publish_custom_queue(self): - prod = self.app.amqp.TaskProducer(Mock()) - self.app.amqp.queues['some_queue'] = Queue( - 'xxx', Exchange('yyy'), 'zzz', - ) - prod.channel.connection.client.declared_entities = set() - prod.publish = Mock() - prod.publish_task('tasks.add', (8, 8), {}, retry=False, - queue='some_queue') - self.assertEqual(prod.publish.call_args[1]['exchange'], 'yyy') - self.assertEqual(prod.publish.call_args[1]['routing_key'], 'zzz') - - def test_publish_with_countdown(self): - prod = self.app.amqp.TaskProducer(Mock()) - prod.channel.connection.client.declared_entities = set() - prod.publish = Mock() - now = datetime.datetime(2013, 11, 26, 16, 48, 46) - prod.publish_task('tasks.add', (1, 1), {}, retry=False, - countdown=10, now=now) - self.assertEqual( - prod.publish.call_args[0][0]['eta'], - '2013-11-26T16:48:56+00:00', - ) - - def test_publish_with_countdown_and_timezone(self): - # use timezone with fixed offset to be sure it won't be changed - self.app.conf.CELERY_TIMEZONE = pytz.FixedOffset(120) - prod = self.app.amqp.TaskProducer(Mock()) - prod.channel.connection.client.declared_entities = set() - prod.publish = Mock() - now = datetime.datetime(2013, 11, 26, 16, 48, 46) - prod.publish_task('tasks.add', (2, 2), {}, retry=False, - countdown=20, now=now) - self.assertEqual( - prod.publish.call_args[0][0]['eta'], - '2013-11-26T18:49:06+02:00', - ) - - def test_event_dispatcher(self): - prod = self.app.amqp.TaskProducer(Mock()) - self.assertTrue(prod.event_dispatcher) - self.assertFalse(prod.event_dispatcher.enabled) - - -class test_TaskConsumer(AppCase): - - def test_accept_content(self): - with self.app.pool.acquire(block=True) as conn: - self.app.conf.CELERY_ACCEPT_CONTENT = ['application/json'] - self.assertEqual( - self.app.amqp.TaskConsumer(conn).accept, - set(['application/json']) - ) - self.assertEqual( - self.app.amqp.TaskConsumer(conn, accept=['json']).accept, - set(['application/json']), - ) - - -class test_compat_TaskPublisher(AppCase): - - def test_compat_exchange_is_string(self): - producer = TaskPublisher(exchange='foo', app=self.app) - self.assertIsInstance(producer.exchange, Exchange) - self.assertEqual(producer.exchange.name, 'foo') - self.assertEqual(producer.exchange.type, 'direct') - producer = TaskPublisher(exchange='foo', exchange_type='topic', - app=self.app) - self.assertEqual(producer.exchange.type, 'topic') - - def test_compat_exchange_is_Exchange(self): - producer = TaskPublisher(exchange=Exchange('foo'), app=self.app) - self.assertEqual(producer.exchange.name, 'foo') - - -class test_PublisherPool(AppCase): - - def test_setup_nolimit(self): - self.app.conf.BROKER_POOL_LIMIT = None - try: - delattr(self.app, '_pool') - except AttributeError: - pass - self.app.amqp._producer_pool = None - pool = self.app.amqp.producer_pool - self.assertEqual(pool.limit, self.app.pool.limit) - self.assertFalse(pool._resource.queue) - - r1 = pool.acquire() - r2 = pool.acquire() - r1.release() - r2.release() - r1 = pool.acquire() - r2 = pool.acquire() - - def test_setup(self): - self.app.conf.BROKER_POOL_LIMIT = 2 - try: - delattr(self.app, '_pool') - except AttributeError: - pass - self.app.amqp._producer_pool = None - pool = self.app.amqp.producer_pool - self.assertEqual(pool.limit, self.app.pool.limit) - self.assertTrue(pool._resource.queue) - - p1 = r1 = pool.acquire() - p2 = r2 = pool.acquire() - r1.release() - r2.release() - r1 = pool.acquire() - r2 = pool.acquire() - self.assertIs(p2, r1) - self.assertIs(p1, r2) - r1.release() - r2.release() - - -class test_Queues(AppCase): - - def test_queues_format(self): - self.app.amqp.queues._consume_from = {} - self.assertEqual(self.app.amqp.queues.format(), '') - - def test_with_defaults(self): - self.assertEqual(Queues(None), {}) - - def test_add(self): - q = Queues() - q.add('foo', exchange='ex', routing_key='rk') - self.assertIn('foo', q) - self.assertIsInstance(q['foo'], Queue) - self.assertEqual(q['foo'].routing_key, 'rk') - - def test_with_ha_policy(self): - qn = Queues(ha_policy=None, create_missing=False) - qn.add('xyz') - self.assertIsNone(qn['xyz'].queue_arguments) - - qn.add('xyx', queue_arguments={'x-foo': 'bar'}) - self.assertEqual(qn['xyx'].queue_arguments, {'x-foo': 'bar'}) - - q = Queues(ha_policy='all', create_missing=False) - q.add(Queue('foo')) - self.assertEqual(q['foo'].queue_arguments, {'x-ha-policy': 'all'}) - - qq = Queue('xyx2', queue_arguments={'x-foo': 'bari'}) - q.add(qq) - self.assertEqual(q['xyx2'].queue_arguments, { - 'x-ha-policy': 'all', - 'x-foo': 'bari', - }) - - q2 = Queues(ha_policy=['A', 'B', 'C'], create_missing=False) - q2.add(Queue('foo')) - self.assertEqual(q2['foo'].queue_arguments, { - 'x-ha-policy': 'nodes', - 'x-ha-policy-params': ['A', 'B', 'C'], - }) - - def test_select_add(self): - q = Queues() - q.select(['foo', 'bar']) - q.select_add('baz') - self.assertItemsEqual(keys(q._consume_from), ['foo', 'bar', 'baz']) - - def test_deselect(self): - q = Queues() - q.select(['foo', 'bar']) - q.deselect('bar') - self.assertItemsEqual(keys(q._consume_from), ['foo']) - - def test_with_ha_policy_compat(self): - q = Queues(ha_policy='all') - q.add('bar') - self.assertEqual(q['bar'].queue_arguments, {'x-ha-policy': 'all'}) - - def test_add_default_exchange(self): - ex = Exchange('fff', 'fanout') - q = Queues(default_exchange=ex) - q.add(Queue('foo')) - self.assertEqual(q['foo'].exchange, ex) - - def test_alias(self): - q = Queues() - q.add(Queue('foo', alias='barfoo')) - self.assertIs(q['barfoo'], q['foo']) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_annotations.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_annotations.py deleted file mode 100644 index 559f5cb..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_annotations.py +++ /dev/null @@ -1,56 +0,0 @@ -from __future__ import absolute_import - -from celery.app.annotations import MapAnnotation, prepare -from celery.utils.imports import qualname - -from celery.tests.case import AppCase - - -class MyAnnotation(object): - foo = 65 - - -class AnnotationCase(AppCase): - - def setup(self): - @self.app.task(shared=False) - def add(x, y): - return x + y - self.add = add - - @self.app.task(shared=False) - def mul(x, y): - return x * y - self.mul = mul - - -class test_MapAnnotation(AnnotationCase): - - def test_annotate(self): - x = MapAnnotation({self.add.name: {'foo': 1}}) - self.assertDictEqual(x.annotate(self.add), {'foo': 1}) - self.assertIsNone(x.annotate(self.mul)) - - def test_annotate_any(self): - x = MapAnnotation({'*': {'foo': 2}}) - self.assertDictEqual(x.annotate_any(), {'foo': 2}) - - x = MapAnnotation() - self.assertIsNone(x.annotate_any()) - - -class test_prepare(AnnotationCase): - - def test_dict_to_MapAnnotation(self): - x = prepare({self.add.name: {'foo': 3}}) - self.assertIsInstance(x[0], MapAnnotation) - - def test_returns_list(self): - self.assertListEqual(prepare(1), [1]) - self.assertListEqual(prepare([1]), [1]) - self.assertListEqual(prepare((1, )), [1]) - self.assertEqual(prepare(None), ()) - - def test_evalutes_qualnames(self): - self.assertEqual(prepare(qualname(MyAnnotation))[0]().foo, 65) - self.assertEqual(prepare([qualname(MyAnnotation)])[0]().foo, 65) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_app.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_app.py deleted file mode 100644 index 9d260c6..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_app.py +++ /dev/null @@ -1,726 +0,0 @@ -from __future__ import absolute_import - -import gc -import os -import itertools - -from copy import deepcopy -from pickle import loads, dumps - -from amqp import promise -from kombu import Exchange - -from celery import shared_task, current_app -from celery import app as _app -from celery import _state -from celery.app import base as _appbase -from celery.app import defaults -from celery.exceptions import ImproperlyConfigured -from celery.five import items -from celery.loaders.base import BaseLoader -from celery.platforms import pyimplementation -from celery.utils.serialization import pickle - -from celery.tests.case import ( - CELERY_TEST_CONFIG, - AppCase, - Mock, - depends_on_current_app, - mask_modules, - patch, - platform_pyimp, - sys_platform, - pypy_version, - with_environ, -) -from celery.utils import uuid -from celery.utils.mail import ErrorMail - -THIS_IS_A_KEY = 'this is a value' - - -class ObjectConfig(object): - FOO = 1 - BAR = 2 - -object_config = ObjectConfig() -dict_config = dict(FOO=10, BAR=20) - - -class ObjectConfig2(object): - LEAVE_FOR_WORK = True - MOMENT_TO_STOP = True - CALL_ME_BACK = 123456789 - WANT_ME_TO = False - UNDERSTAND_ME = True - - -class Object(object): - - def __init__(self, **kwargs): - for key, value in items(kwargs): - setattr(self, key, value) - - -def _get_test_config(): - return deepcopy(CELERY_TEST_CONFIG) -test_config = _get_test_config() - - -class test_module(AppCase): - - def test_default_app(self): - self.assertEqual(_app.default_app, _state.default_app) - - def test_bugreport(self): - self.assertTrue(_app.bugreport(app=self.app)) - - -class test_App(AppCase): - - def setup(self): - self.app.add_defaults(test_config) - - def test_task_autofinalize_disabled(self): - with self.Celery('xyzibari', autofinalize=False) as app: - @app.task - def ttafd(): - return 42 - - with self.assertRaises(RuntimeError): - ttafd() - - with self.Celery('xyzibari', autofinalize=False) as app: - @app.task - def ttafd2(): - return 42 - - app.finalize() - self.assertEqual(ttafd2(), 42) - - def test_registry_autofinalize_disabled(self): - with self.Celery('xyzibari', autofinalize=False) as app: - with self.assertRaises(RuntimeError): - app.tasks['celery.chain'] - app.finalize() - self.assertTrue(app.tasks['celery.chain']) - - def test_task(self): - with self.Celery('foozibari') as app: - - def fun(): - pass - - fun.__module__ = '__main__' - task = app.task(fun) - self.assertEqual(task.name, app.main + '.fun') - - def test_with_config_source(self): - with self.Celery(config_source=ObjectConfig) as app: - self.assertEqual(app.conf.FOO, 1) - self.assertEqual(app.conf.BAR, 2) - - @depends_on_current_app - def test_task_windows_execv(self): - prev, _appbase._EXECV = _appbase._EXECV, True - try: - - @self.app.task(shared=False) - def foo(): - pass - - self.assertTrue(foo._get_current_object()) # is proxy - - finally: - _appbase._EXECV = prev - assert not _appbase._EXECV - - def test_task_takes_no_args(self): - with self.assertRaises(TypeError): - @self.app.task(1) - def foo(): - pass - - def test_add_defaults(self): - self.assertFalse(self.app.configured) - _conf = {'FOO': 300} - - def conf(): - return _conf - - self.app.add_defaults(conf) - self.assertIn(conf, self.app._pending_defaults) - self.assertFalse(self.app.configured) - self.assertEqual(self.app.conf.FOO, 300) - self.assertTrue(self.app.configured) - self.assertFalse(self.app._pending_defaults) - - # defaults not pickled - appr = loads(dumps(self.app)) - with self.assertRaises(AttributeError): - appr.conf.FOO - - # add more defaults after configured - conf2 = {'FOO': 'BAR'} - self.app.add_defaults(conf2) - self.assertEqual(self.app.conf.FOO, 'BAR') - - self.assertIn(_conf, self.app.conf.defaults) - self.assertIn(conf2, self.app.conf.defaults) - - def test_connection_or_acquire(self): - with self.app.connection_or_acquire(block=True): - self.assertTrue(self.app.pool._dirty) - - with self.app.connection_or_acquire(pool=False): - self.assertFalse(self.app.pool._dirty) - - def test_maybe_close_pool(self): - cpool = self.app._pool = Mock() - amqp = self.app.__dict__['amqp'] = Mock() - ppool = amqp._producer_pool - self.app._maybe_close_pool() - cpool.force_close_all.assert_called_with() - ppool.force_close_all.assert_called_with() - self.assertIsNone(self.app._pool) - self.assertIsNone(self.app.__dict__['amqp']._producer_pool) - - self.app._pool = Mock() - self.app._maybe_close_pool() - self.app._maybe_close_pool() - - def test_using_v1_reduce(self): - self.app._using_v1_reduce = True - self.assertTrue(loads(dumps(self.app))) - - def test_autodiscover_tasks_force(self): - self.app.loader.autodiscover_tasks = Mock() - self.app.autodiscover_tasks(['proj.A', 'proj.B'], force=True) - self.app.loader.autodiscover_tasks.assert_called_with( - ['proj.A', 'proj.B'], 'tasks', - ) - self.app.loader.autodiscover_tasks = Mock() - self.app.autodiscover_tasks( - lambda: ['proj.A', 'proj.B'], - related_name='george', - force=True, - ) - self.app.loader.autodiscover_tasks.assert_called_with( - ['proj.A', 'proj.B'], 'george', - ) - - def test_autodiscover_tasks_lazy(self): - with patch('celery.signals.import_modules') as import_modules: - - def packages(): - return [1, 2, 3] - - self.app.autodiscover_tasks(packages) - self.assertTrue(import_modules.connect.called) - prom = import_modules.connect.call_args[0][0] - self.assertIsInstance(prom, promise) - self.assertEqual(prom.fun, self.app._autodiscover_tasks) - self.assertEqual(prom.args[0](), [1, 2, 3]) - - @with_environ('CELERY_BROKER_URL', '') - def test_with_broker(self): - with self.Celery(broker='foo://baribaz') as app: - self.assertEqual(app.conf.BROKER_URL, 'foo://baribaz') - - def test_repr(self): - self.assertTrue(repr(self.app)) - - def test_custom_task_registry(self): - with self.Celery(tasks=self.app.tasks) as app2: - self.assertIs(app2.tasks, self.app.tasks) - - def test_include_argument(self): - with self.Celery(include=('foo', 'bar.foo')) as app: - self.assertEqual(app.conf.CELERY_IMPORTS, ('foo', 'bar.foo')) - - def test_set_as_current(self): - current = _state._tls.current_app - try: - app = self.Celery(set_as_current=True) - self.assertIs(_state._tls.current_app, app) - finally: - _state._tls.current_app = current - - def test_current_task(self): - @self.app.task - def foo(shared=False): - pass - - _state._task_stack.push(foo) - try: - self.assertEqual(self.app.current_task.name, foo.name) - finally: - _state._task_stack.pop() - - def test_task_not_shared(self): - with patch('celery.app.base.connect_on_app_finalize') as sh: - @self.app.task(shared=False) - def foo(): - pass - self.assertFalse(sh.called) - - def test_task_compat_with_filter(self): - with self.Celery(accept_magic_kwargs=True) as app: - check = Mock() - - def filter(task): - check(task) - return task - - @app.task(filter=filter, shared=False) - def foo(): - pass - check.assert_called_with(foo) - - def test_task_with_filter(self): - with self.Celery(accept_magic_kwargs=False) as app: - check = Mock() - - def filter(task): - check(task) - return task - - assert not _appbase._EXECV - - @app.task(filter=filter, shared=False) - def foo(): - pass - check.assert_called_with(foo) - - def test_task_sets_main_name_MP_MAIN_FILE(self): - from celery import utils as _utils - _utils.MP_MAIN_FILE = __file__ - try: - with self.Celery('xuzzy') as app: - - @app.task - def foo(): - pass - - self.assertEqual(foo.name, 'xuzzy.foo') - finally: - _utils.MP_MAIN_FILE = None - - def test_annotate_decorator(self): - from celery.app.task import Task - - class adX(Task): - abstract = True - - def run(self, y, z, x): - return y, z, x - - check = Mock() - - def deco(fun): - - def _inner(*args, **kwargs): - check(*args, **kwargs) - return fun(*args, **kwargs) - return _inner - - self.app.conf.CELERY_ANNOTATIONS = { - adX.name: {'@__call__': deco} - } - adX.bind(self.app) - self.assertIs(adX.app, self.app) - - i = adX() - i(2, 4, x=3) - check.assert_called_with(i, 2, 4, x=3) - - i.annotate() - i.annotate() - - def test_apply_async_has__self__(self): - @self.app.task(__self__='hello', shared=False) - def aawsX(): - pass - - with patch('celery.app.amqp.TaskProducer.publish_task') as dt: - aawsX.apply_async((4, 5)) - args = dt.call_args[0][1] - self.assertEqual(args, ('hello', 4, 5)) - - def test_apply_async_adds_children(self): - from celery._state import _task_stack - - @self.app.task(shared=False) - def a3cX1(self): - pass - - @self.app.task(shared=False) - def a3cX2(self): - pass - - _task_stack.push(a3cX1) - try: - a3cX1.push_request(called_directly=False) - try: - res = a3cX2.apply_async(add_to_parent=True) - self.assertIn(res, a3cX1.request.children) - finally: - a3cX1.pop_request() - finally: - _task_stack.pop() - - def test_pickle_app(self): - changes = dict(THE_FOO_BAR='bars', - THE_MII_MAR='jars') - self.app.conf.update(changes) - saved = pickle.dumps(self.app) - self.assertLess(len(saved), 2048) - restored = pickle.loads(saved) - self.assertDictContainsSubset(changes, restored.conf) - - def test_worker_main(self): - from celery.bin import worker as worker_bin - - class worker(worker_bin.worker): - - def execute_from_commandline(self, argv): - return argv - - prev, worker_bin.worker = worker_bin.worker, worker - try: - ret = self.app.worker_main(argv=['--version']) - self.assertListEqual(ret, ['--version']) - finally: - worker_bin.worker = prev - - def test_config_from_envvar(self): - os.environ['CELERYTEST_CONFIG_OBJECT'] = 'celery.tests.app.test_app' - self.app.config_from_envvar('CELERYTEST_CONFIG_OBJECT') - self.assertEqual(self.app.conf.THIS_IS_A_KEY, 'this is a value') - - def assert_config2(self): - self.assertTrue(self.app.conf.LEAVE_FOR_WORK) - self.assertTrue(self.app.conf.MOMENT_TO_STOP) - self.assertEqual(self.app.conf.CALL_ME_BACK, 123456789) - self.assertFalse(self.app.conf.WANT_ME_TO) - self.assertTrue(self.app.conf.UNDERSTAND_ME) - - def test_config_from_object__lazy(self): - conf = ObjectConfig2() - self.app.config_from_object(conf) - self.assertFalse(self.app.loader._conf) - self.assertIs(self.app._config_source, conf) - - self.assert_config2() - - def test_config_from_object__force(self): - self.app.config_from_object(ObjectConfig2(), force=True) - self.assertTrue(self.app.loader._conf) - - self.assert_config2() - - def test_config_from_cmdline(self): - cmdline = ['.always_eager=no', - '.result_backend=/dev/null', - 'celeryd.prefetch_multiplier=368', - '.foobarstring=(string)300', - '.foobarint=(int)300', - '.result_engine_options=(dict){"foo": "bar"}'] - self.app.config_from_cmdline(cmdline, namespace='celery') - self.assertFalse(self.app.conf.CELERY_ALWAYS_EAGER) - self.assertEqual(self.app.conf.CELERY_RESULT_BACKEND, '/dev/null') - self.assertEqual(self.app.conf.CELERYD_PREFETCH_MULTIPLIER, 368) - self.assertEqual(self.app.conf.CELERY_FOOBARSTRING, '300') - self.assertEqual(self.app.conf.CELERY_FOOBARINT, 300) - self.assertDictEqual(self.app.conf.CELERY_RESULT_ENGINE_OPTIONS, - {'foo': 'bar'}) - - def test_compat_setting_CELERY_BACKEND(self): - self.app._preconf = {} # removes result backend set by AppCase - self.app.config_from_object(Object(CELERY_BACKEND='set_by_us')) - self.assertEqual(self.app.conf.CELERY_RESULT_BACKEND, 'set_by_us') - - def test_setting_BROKER_TRANSPORT_OPTIONS(self): - - _args = {'foo': 'bar', 'spam': 'baz'} - - self.app.config_from_object(Object()) - self.assertEqual(self.app.conf.BROKER_TRANSPORT_OPTIONS, {}) - - self.app.config_from_object(Object(BROKER_TRANSPORT_OPTIONS=_args)) - self.assertEqual(self.app.conf.BROKER_TRANSPORT_OPTIONS, _args) - - def test_Windows_log_color_disabled(self): - self.app.IS_WINDOWS = True - self.assertFalse(self.app.log.supports_color(True)) - - def test_compat_setting_CARROT_BACKEND(self): - self.app.config_from_object(Object(CARROT_BACKEND='set_by_us')) - self.assertEqual(self.app.conf.BROKER_TRANSPORT, 'set_by_us') - - def test_WorkController(self): - x = self.app.WorkController - self.assertIs(x.app, self.app) - - def test_Worker(self): - x = self.app.Worker - self.assertIs(x.app, self.app) - - @depends_on_current_app - def test_AsyncResult(self): - x = self.app.AsyncResult('1') - self.assertIs(x.app, self.app) - r = loads(dumps(x)) - # not set as current, so ends up as default app after reduce - self.assertIs(r.app, current_app._get_current_object()) - - def test_get_active_apps(self): - self.assertTrue(list(_state._get_active_apps())) - - app1 = self.Celery() - appid = id(app1) - self.assertIn(app1, _state._get_active_apps()) - app1.close() - del(app1) - - gc.collect() - - # weakref removed from list when app goes out of scope. - with self.assertRaises(StopIteration): - next(app for app in _state._get_active_apps() if id(app) == appid) - - def test_config_from_envvar_more(self, key='CELERY_HARNESS_CFG1'): - self.assertFalse( - self.app.config_from_envvar( - 'HDSAJIHWIQHEWQU', force=True, silent=True), - ) - with self.assertRaises(ImproperlyConfigured): - self.app.config_from_envvar( - 'HDSAJIHWIQHEWQU', force=True, silent=False, - ) - os.environ[key] = __name__ + '.object_config' - self.assertTrue(self.app.config_from_envvar(key, force=True)) - self.assertEqual(self.app.conf['FOO'], 1) - self.assertEqual(self.app.conf['BAR'], 2) - - os.environ[key] = 'unknown_asdwqe.asdwqewqe' - with self.assertRaises(ImportError): - self.app.config_from_envvar(key, silent=False) - self.assertFalse( - self.app.config_from_envvar(key, force=True, silent=True), - ) - - os.environ[key] = __name__ + '.dict_config' - self.assertTrue(self.app.config_from_envvar(key, force=True)) - self.assertEqual(self.app.conf['FOO'], 10) - self.assertEqual(self.app.conf['BAR'], 20) - - @patch('celery.bin.celery.CeleryCommand.execute_from_commandline') - def test_start(self, execute): - self.app.start() - self.assertTrue(execute.called) - - def test_mail_admins(self): - - class Loader(BaseLoader): - - def mail_admins(*args, **kwargs): - return args, kwargs - - self.app.loader = Loader(app=self.app) - self.app.conf.ADMINS = None - self.assertFalse(self.app.mail_admins('Subject', 'Body')) - self.app.conf.ADMINS = [('George Costanza', 'george@vandelay.com')] - self.assertTrue(self.app.mail_admins('Subject', 'Body')) - - def test_amqp_get_broker_info(self): - self.assertDictContainsSubset( - {'hostname': 'localhost', - 'userid': 'guest', - 'password': 'guest', - 'virtual_host': '/'}, - self.app.connection('pyamqp://').info(), - ) - self.app.conf.BROKER_PORT = 1978 - self.app.conf.BROKER_VHOST = 'foo' - self.assertDictContainsSubset( - {'port': 1978, 'virtual_host': 'foo'}, - self.app.connection('pyamqp://:1978/foo').info(), - ) - conn = self.app.connection('pyamqp:////value') - self.assertDictContainsSubset({'virtual_host': '/value'}, - conn.info()) - - def test_amqp_failover_strategy_selection(self): - # Test passing in a string and make sure the string - # gets there untouched - self.app.conf.BROKER_FAILOVER_STRATEGY = 'foo-bar' - self.assertEqual( - self.app.connection('amqp:////value').failover_strategy, - 'foo-bar', - ) - - # Try passing in None - self.app.conf.BROKER_FAILOVER_STRATEGY = None - self.assertEqual( - self.app.connection('amqp:////value').failover_strategy, - itertools.cycle, - ) - - # Test passing in a method - def my_failover_strategy(it): - yield True - - self.app.conf.BROKER_FAILOVER_STRATEGY = my_failover_strategy - self.assertEqual( - self.app.connection('amqp:////value').failover_strategy, - my_failover_strategy, - ) - - def test_BROKER_BACKEND_alias(self): - self.assertEqual(self.app.conf.BROKER_BACKEND, - self.app.conf.BROKER_TRANSPORT) - - def test_after_fork(self): - p = self.app._pool = Mock() - self.app._after_fork(self.app) - p.force_close_all.assert_called_with() - self.assertIsNone(self.app._pool) - self.app._after_fork(self.app) - - def test_pool_no_multiprocessing(self): - with mask_modules('multiprocessing.util'): - pool = self.app.pool - self.assertIs(pool, self.app._pool) - - def test_bugreport(self): - self.assertTrue(self.app.bugreport()) - - def test_send_task_sent_event(self): - - class Dispatcher(object): - sent = [] - - def publish(self, type, fields, *args, **kwargs): - self.sent.append((type, fields)) - - conn = self.app.connection() - chan = conn.channel() - try: - for e in ('foo_exchange', 'moo_exchange', 'bar_exchange'): - chan.exchange_declare(e, 'direct', durable=True) - chan.queue_declare(e, durable=True) - chan.queue_bind(e, e, e) - finally: - chan.close() - assert conn.transport_cls == 'memory' - - prod = self.app.amqp.TaskProducer( - conn, exchange=Exchange('foo_exchange'), - send_sent_event=True, - ) - - dispatcher = Dispatcher() - self.assertTrue(prod.publish_task('footask', (), {}, - exchange='moo_exchange', - routing_key='moo_exchange', - event_dispatcher=dispatcher)) - self.assertTrue(dispatcher.sent) - self.assertEqual(dispatcher.sent[0][0], 'task-sent') - self.assertTrue(prod.publish_task('footask', (), {}, - event_dispatcher=dispatcher, - exchange='bar_exchange', - routing_key='bar_exchange')) - - def test_error_mail_sender(self): - x = ErrorMail.subject % {'name': 'task_name', - 'id': uuid(), - 'exc': 'FOOBARBAZ', - 'hostname': 'lana'} - self.assertTrue(x) - - def test_error_mail_disabled(self): - task = Mock() - x = ErrorMail(task) - x.should_send = Mock() - x.should_send.return_value = False - x.send(Mock(), Mock()) - self.assertFalse(task.app.mail_admins.called) - - -class test_defaults(AppCase): - - def test_strtobool(self): - for s in ('false', 'no', '0'): - self.assertFalse(defaults.strtobool(s)) - for s in ('true', 'yes', '1'): - self.assertTrue(defaults.strtobool(s)) - with self.assertRaises(TypeError): - defaults.strtobool('unsure') - - -class test_debugging_utils(AppCase): - - def test_enable_disable_trace(self): - try: - _app.enable_trace() - self.assertEqual(_app.app_or_default, _app._app_or_default_trace) - _app.disable_trace() - self.assertEqual(_app.app_or_default, _app._app_or_default) - finally: - _app.disable_trace() - - -class test_pyimplementation(AppCase): - - def test_platform_python_implementation(self): - with platform_pyimp(lambda: 'Xython'): - self.assertEqual(pyimplementation(), 'Xython') - - def test_platform_jython(self): - with platform_pyimp(): - with sys_platform('java 1.6.51'): - self.assertIn('Jython', pyimplementation()) - - def test_platform_pypy(self): - with platform_pyimp(): - with sys_platform('darwin'): - with pypy_version((1, 4, 3)): - self.assertIn('PyPy', pyimplementation()) - with pypy_version((1, 4, 3, 'a4')): - self.assertIn('PyPy', pyimplementation()) - - def test_platform_fallback(self): - with platform_pyimp(): - with sys_platform('darwin'): - with pypy_version(): - self.assertEqual('CPython', pyimplementation()) - - -class test_shared_task(AppCase): - - def test_registers_to_all_apps(self): - with self.Celery('xproj', set_as_current=True) as xproj: - xproj.finalize() - - @shared_task - def foo(): - return 42 - - @shared_task() - def bar(): - return 84 - - self.assertIs(foo.app, xproj) - self.assertIs(bar.app, xproj) - self.assertTrue(foo._get_current_object()) - - with self.Celery('yproj', set_as_current=True) as yproj: - self.assertIs(foo.app, yproj) - self.assertIs(bar.app, yproj) - - @shared_task() - def baz(): - return 168 - - self.assertIs(baz.app, yproj) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_beat.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_beat.py deleted file mode 100644 index 67e4f53..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_beat.py +++ /dev/null @@ -1,539 +0,0 @@ -from __future__ import absolute_import - -import errno - -from datetime import datetime, timedelta -from pickle import dumps, loads - -from celery import beat -from celery.five import keys, string_t -from celery.schedules import schedule -from celery.utils import uuid -from celery.tests.case import AppCase, Mock, SkipTest, call, patch - - -class Object(object): - pass - - -class MockShelve(dict): - closed = False - synced = False - - def close(self): - self.closed = True - - def sync(self): - self.synced = True - - -class MockService(object): - started = False - stopped = False - - def __init__(self, *args, **kwargs): - pass - - def start(self, **kwargs): - self.started = True - - def stop(self, **kwargs): - self.stopped = True - - -class test_ScheduleEntry(AppCase): - Entry = beat.ScheduleEntry - - def create_entry(self, **kwargs): - entry = dict( - name='celery.unittest.add', - schedule=timedelta(seconds=10), - args=(2, 2), - options={'routing_key': 'cpu'}, - app=self.app, - ) - return self.Entry(**dict(entry, **kwargs)) - - def test_next(self): - entry = self.create_entry(schedule=10) - self.assertTrue(entry.last_run_at) - self.assertIsInstance(entry.last_run_at, datetime) - self.assertEqual(entry.total_run_count, 0) - - next_run_at = entry.last_run_at + timedelta(seconds=10) - next_entry = entry.next(next_run_at) - self.assertGreaterEqual(next_entry.last_run_at, next_run_at) - self.assertEqual(next_entry.total_run_count, 1) - - def test_is_due(self): - entry = self.create_entry(schedule=timedelta(seconds=10)) - self.assertIs(entry.app, self.app) - self.assertIs(entry.schedule.app, self.app) - due1, next_time_to_run1 = entry.is_due() - self.assertFalse(due1) - self.assertGreater(next_time_to_run1, 9) - - next_run_at = entry.last_run_at - timedelta(seconds=10) - next_entry = entry.next(next_run_at) - due2, next_time_to_run2 = next_entry.is_due() - self.assertTrue(due2) - self.assertGreater(next_time_to_run2, 9) - - def test_repr(self): - entry = self.create_entry() - self.assertIn(' 1: - return s.sh - raise OSError() - opens.side_effect = effect - s.setup_schedule() - s._remove_db.assert_called_with() - - s._store = {'__version__': 1} - s.setup_schedule() - - s._store.clear = Mock() - op = s.persistence.open = Mock() - op.return_value = s._store - s._store['tz'] = 'FUNKY' - s.setup_schedule() - op.assert_called_with(s.schedule_filename, writeback=True) - s._store.clear.assert_called_with() - s._store['utc_enabled'] = False - s._store.clear = Mock() - s.setup_schedule() - s._store.clear.assert_called_with() - - def test_get_schedule(self): - s = create_persistent_scheduler()[0]( - schedule_filename='schedule', app=self.app, - ) - s._store = {'entries': {}} - s.schedule = {'foo': 'bar'} - self.assertDictEqual(s.schedule, {'foo': 'bar'}) - self.assertDictEqual(s._store['entries'], s.schedule) - - -class test_Service(AppCase): - - def get_service(self): - Scheduler, mock_shelve = create_persistent_scheduler() - return beat.Service(app=self.app, scheduler_cls=Scheduler), mock_shelve - - def test_pickleable(self): - s = beat.Service(app=self.app, scheduler_cls=Mock) - self.assertTrue(loads(dumps(s))) - - def test_start(self): - s, sh = self.get_service() - schedule = s.scheduler.schedule - self.assertIsInstance(schedule, dict) - self.assertIsInstance(s.scheduler, beat.Scheduler) - scheduled = list(schedule.keys()) - for task_name in keys(sh['entries']): - self.assertIn(task_name, scheduled) - - s.sync() - self.assertTrue(sh.closed) - self.assertTrue(sh.synced) - self.assertTrue(s._is_stopped.isSet()) - s.sync() - s.stop(wait=False) - self.assertTrue(s._is_shutdown.isSet()) - s.stop(wait=True) - self.assertTrue(s._is_shutdown.isSet()) - - p = s.scheduler._store - s.scheduler._store = None - try: - s.scheduler.sync() - finally: - s.scheduler._store = p - - def test_start_embedded_process(self): - s, sh = self.get_service() - s._is_shutdown.set() - s.start(embedded_process=True) - - def test_start_thread(self): - s, sh = self.get_service() - s._is_shutdown.set() - s.start(embedded_process=False) - - def test_start_tick_raises_exit_error(self): - s, sh = self.get_service() - s.scheduler.tick_raises_exit = True - s.start() - self.assertTrue(s._is_shutdown.isSet()) - - def test_start_manages_one_tick_before_shutdown(self): - s, sh = self.get_service() - s.scheduler.shutdown_service = s - s.start() - self.assertTrue(s._is_shutdown.isSet()) - - -class test_EmbeddedService(AppCase): - - def test_start_stop_process(self): - try: - import _multiprocessing # noqa - except ImportError: - raise SkipTest('multiprocessing not available') - - from billiard.process import Process - - s = beat.EmbeddedService(self.app) - self.assertIsInstance(s, Process) - self.assertIsInstance(s.service, beat.Service) - s.service = MockService() - - class _Popen(object): - terminated = False - - def terminate(self): - self.terminated = True - - with patch('celery.platforms.close_open_fds'): - s.run() - self.assertTrue(s.service.started) - - s._popen = _Popen() - s.stop() - self.assertTrue(s.service.stopped) - self.assertTrue(s._popen.terminated) - - def test_start_stop_threaded(self): - s = beat.EmbeddedService(self.app, thread=True) - from threading import Thread - self.assertIsInstance(s, Thread) - self.assertIsInstance(s.service, beat.Service) - s.service = MockService() - - s.run() - self.assertTrue(s.service.started) - - s.stop() - self.assertTrue(s.service.stopped) - - -class test_schedule(AppCase): - - def test_maybe_make_aware(self): - x = schedule(10, app=self.app) - x.utc_enabled = True - d = x.maybe_make_aware(datetime.utcnow()) - self.assertTrue(d.tzinfo) - x.utc_enabled = False - d2 = x.maybe_make_aware(datetime.utcnow()) - self.assertIsNone(d2.tzinfo) - - def test_to_local(self): - x = schedule(10, app=self.app) - x.utc_enabled = True - d = x.to_local(datetime.utcnow()) - self.assertIsNone(d.tzinfo) - x.utc_enabled = False - d = x.to_local(datetime.utcnow()) - self.assertTrue(d.tzinfo) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_builtins.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_builtins.py deleted file mode 100644 index 0d04a52..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_builtins.py +++ /dev/null @@ -1,217 +0,0 @@ -from __future__ import absolute_import - -from celery import group, chord -from celery.app import builtins -from celery.canvas import Signature -from celery.five import range -from celery._state import _task_stack -from celery.tests.case import AppCase, Mock, patch - - -class BuiltinsCase(AppCase): - - def setup(self): - @self.app.task(shared=False) - def xsum(x): - return sum(x) - self.xsum = xsum - - @self.app.task(shared=False) - def add(x, y): - return x + y - self.add = add - - -class test_backend_cleanup(BuiltinsCase): - - def test_run(self): - self.app.backend.cleanup = Mock() - self.app.backend.cleanup.__name__ = 'cleanup' - cleanup_task = builtins.add_backend_cleanup_task(self.app) - cleanup_task() - self.assertTrue(self.app.backend.cleanup.called) - - -class test_map(BuiltinsCase): - - def test_run(self): - - @self.app.task(shared=False) - def map_mul(x): - return x[0] * x[1] - - res = self.app.tasks['celery.map']( - map_mul, [(2, 2), (4, 4), (8, 8)], - ) - self.assertEqual(res, [4, 16, 64]) - - -class test_starmap(BuiltinsCase): - - def test_run(self): - - @self.app.task(shared=False) - def smap_mul(x, y): - return x * y - - res = self.app.tasks['celery.starmap']( - smap_mul, [(2, 2), (4, 4), (8, 8)], - ) - self.assertEqual(res, [4, 16, 64]) - - -class test_chunks(BuiltinsCase): - - @patch('celery.canvas.chunks.apply_chunks') - def test_run(self, apply_chunks): - - @self.app.task(shared=False) - def chunks_mul(l): - return l - - self.app.tasks['celery.chunks']( - chunks_mul, [(2, 2), (4, 4), (8, 8)], 1, - ) - self.assertTrue(apply_chunks.called) - - -class test_group(BuiltinsCase): - - def setup(self): - self.task = builtins.add_group_task(self.app)() - super(test_group, self).setup() - - def test_apply_async_eager(self): - self.task.apply = Mock() - self.app.conf.CELERY_ALWAYS_EAGER = True - self.task.apply_async() - self.assertTrue(self.task.apply.called) - - def test_apply(self): - x = group([self.add.s(4, 4), self.add.s(8, 8)]) - x.name = self.task.name - res = x.apply() - self.assertEqual(res.get(), [8, 16]) - - def test_apply_async(self): - x = group([self.add.s(4, 4), self.add.s(8, 8)]) - x.apply_async() - - def test_apply_empty(self): - x = group(app=self.app) - x.apply() - res = x.apply_async() - self.assertTrue(res) - self.assertFalse(res.results) - - def test_apply_async_with_parent(self): - _task_stack.push(self.add) - try: - self.add.push_request(called_directly=False) - try: - assert not self.add.request.children - x = group([self.add.s(4, 4), self.add.s(8, 8)]) - res = x() - self.assertTrue(self.add.request.children) - self.assertIn(res, self.add.request.children) - self.assertEqual(len(self.add.request.children), 1) - finally: - self.add.pop_request() - finally: - _task_stack.pop() - - -class test_chain(BuiltinsCase): - - def setup(self): - BuiltinsCase.setup(self) - self.task = builtins.add_chain_task(self.app)() - - def test_apply_async(self): - c = self.add.s(2, 2) | self.add.s(4) | self.add.s(8) - result = c.apply_async() - self.assertTrue(result.parent) - self.assertTrue(result.parent.parent) - self.assertIsNone(result.parent.parent.parent) - - def test_group_to_chord(self): - c = ( - group(self.add.s(i, i) for i in range(5)) | - self.add.s(10) | - self.add.s(20) | - self.add.s(30) - ) - tasks, _ = c.type.prepare_steps((), c.tasks) - self.assertIsInstance(tasks[0], chord) - self.assertTrue(tasks[0].body.options['link']) - self.assertTrue(tasks[0].body.options['link'][0].options['link']) - - c2 = self.add.s(2, 2) | group(self.add.s(i, i) for i in range(10)) - tasks2, _ = c2.type.prepare_steps((), c2.tasks) - self.assertIsInstance(tasks2[1], group) - - def test_apply_options(self): - - class static(Signature): - - def clone(self, *args, **kwargs): - return self - - def s(*args, **kwargs): - return static(self.add, args, kwargs, type=self.add) - - c = s(2, 2) | s(4, 4) | s(8, 8) - r1 = c.apply_async(task_id='some_id') - self.assertEqual(r1.id, 'some_id') - - c.apply_async(group_id='some_group_id') - self.assertEqual(c.tasks[-1].options['group_id'], 'some_group_id') - - c.apply_async(chord='some_chord_id') - self.assertEqual(c.tasks[-1].options['chord'], 'some_chord_id') - - c.apply_async(link=[s(32)]) - self.assertListEqual(c.tasks[-1].options['link'], [s(32)]) - - c.apply_async(link_error=[s('error')]) - for task in c.tasks: - self.assertListEqual(task.options['link_error'], [s('error')]) - - -class test_chord(BuiltinsCase): - - def setup(self): - self.task = builtins.add_chord_task(self.app)() - super(test_chord, self).setup() - - def test_apply_async(self): - x = chord([self.add.s(i, i) for i in range(10)], body=self.xsum.s()) - r = x.apply_async() - self.assertTrue(r) - self.assertTrue(r.parent) - - def test_run_header_not_group(self): - self.task([self.add.s(i, i) for i in range(10)], self.xsum.s()) - - def test_forward_options(self): - body = self.xsum.s() - x = chord([self.add.s(i, i) for i in range(10)], body=body) - x._type = Mock() - x._type.app.conf.CELERY_ALWAYS_EAGER = False - x.apply_async(group_id='some_group_id') - self.assertTrue(x._type.called) - resbody = x._type.call_args[0][1] - self.assertEqual(resbody.options['group_id'], 'some_group_id') - x2 = chord([self.add.s(i, i) for i in range(10)], body=body) - x2._type = Mock() - x2._type.app.conf.CELERY_ALWAYS_EAGER = False - x2.apply_async(chord='some_chord_id') - self.assertTrue(x2._type.called) - resbody = x2._type.call_args[0][1] - self.assertEqual(resbody.options['chord'], 'some_chord_id') - - def test_apply_eager(self): - self.app.conf.CELERY_ALWAYS_EAGER = True - x = chord([self.add.s(i, i) for i in range(10)], body=self.xsum.s()) - r = x.apply_async() - self.assertEqual(r.get(), 90) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_celery.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_celery.py deleted file mode 100644 index 5088d35..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_celery.py +++ /dev/null @@ -1,18 +0,0 @@ -from __future__ import absolute_import -from celery.tests.case import AppCase - -import celery - - -class test_celery_package(AppCase): - - def test_version(self): - self.assertTrue(celery.VERSION) - self.assertGreaterEqual(len(celery.VERSION), 3) - celery.VERSION = (0, 3, 0) - self.assertGreaterEqual(celery.__version__.count('.'), 2) - - def test_meta(self): - for m in ('__author__', '__contact__', '__homepage__', - '__docformat__'): - self.assertTrue(getattr(celery, m, None)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_control.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_control.py deleted file mode 100644 index 7a05506..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_control.py +++ /dev/null @@ -1,251 +0,0 @@ -from __future__ import absolute_import - -from functools import wraps - -from kombu.pidbox import Mailbox - -from celery.app import control -from celery.exceptions import DuplicateNodenameWarning -from celery.utils import uuid -from celery.tests.case import AppCase - - -class MockMailbox(Mailbox): - sent = [] - - def _publish(self, command, *args, **kwargs): - self.__class__.sent.append(command) - - def close(self): - pass - - def _collect(self, *args, **kwargs): - pass - - -class Control(control.Control): - Mailbox = MockMailbox - - -def with_mock_broadcast(fun): - - @wraps(fun) - def _resets(*args, **kwargs): - MockMailbox.sent = [] - try: - return fun(*args, **kwargs) - finally: - MockMailbox.sent = [] - return _resets - - -class test_flatten_reply(AppCase): - - def test_flatten_reply(self): - reply = [ - {'foo@example.com': {'hello': 10}}, - {'foo@example.com': {'hello': 20}}, - {'bar@example.com': {'hello': 30}} - ] - with self.assertWarns(DuplicateNodenameWarning) as w: - nodes = control.flatten_reply(reply) - - self.assertIn( - 'Received multiple replies from node name: foo@example.com.', - str(w.warning) - ) - self.assertIn('foo@example.com', nodes) - self.assertIn('bar@example.com', nodes) - - -class test_inspect(AppCase): - - def setup(self): - self.c = Control(app=self.app) - self.prev, self.app.control = self.app.control, self.c - self.i = self.c.inspect() - - def test_prepare_reply(self): - self.assertDictEqual(self.i._prepare([{'w1': {'ok': 1}}, - {'w2': {'ok': 1}}]), - {'w1': {'ok': 1}, 'w2': {'ok': 1}}) - - i = self.c.inspect(destination='w1') - self.assertEqual(i._prepare([{'w1': {'ok': 1}}]), - {'ok': 1}) - - @with_mock_broadcast - def test_active(self): - self.i.active() - self.assertIn('dump_active', MockMailbox.sent) - - @with_mock_broadcast - def test_clock(self): - self.i.clock() - self.assertIn('clock', MockMailbox.sent) - - @with_mock_broadcast - def test_conf(self): - self.i.conf() - self.assertIn('dump_conf', MockMailbox.sent) - - @with_mock_broadcast - def test_hello(self): - self.i.hello('george@vandelay.com') - self.assertIn('hello', MockMailbox.sent) - - @with_mock_broadcast - def test_memsample(self): - self.i.memsample() - self.assertIn('memsample', MockMailbox.sent) - - @with_mock_broadcast - def test_memdump(self): - self.i.memdump() - self.assertIn('memdump', MockMailbox.sent) - - @with_mock_broadcast - def test_objgraph(self): - self.i.objgraph() - self.assertIn('objgraph', MockMailbox.sent) - - @with_mock_broadcast - def test_scheduled(self): - self.i.scheduled() - self.assertIn('dump_schedule', MockMailbox.sent) - - @with_mock_broadcast - def test_reserved(self): - self.i.reserved() - self.assertIn('dump_reserved', MockMailbox.sent) - - @with_mock_broadcast - def test_stats(self): - self.i.stats() - self.assertIn('stats', MockMailbox.sent) - - @with_mock_broadcast - def test_revoked(self): - self.i.revoked() - self.assertIn('dump_revoked', MockMailbox.sent) - - @with_mock_broadcast - def test_tasks(self): - self.i.registered() - self.assertIn('dump_tasks', MockMailbox.sent) - - @with_mock_broadcast - def test_ping(self): - self.i.ping() - self.assertIn('ping', MockMailbox.sent) - - @with_mock_broadcast - def test_active_queues(self): - self.i.active_queues() - self.assertIn('active_queues', MockMailbox.sent) - - @with_mock_broadcast - def test_report(self): - self.i.report() - self.assertIn('report', MockMailbox.sent) - - -class test_Broadcast(AppCase): - - def setup(self): - self.control = Control(app=self.app) - self.app.control = self.control - - @self.app.task(shared=False) - def mytask(): - pass - self.mytask = mytask - - def test_purge(self): - self.control.purge() - - @with_mock_broadcast - def test_broadcast(self): - self.control.broadcast('foobarbaz', arguments=[]) - self.assertIn('foobarbaz', MockMailbox.sent) - - @with_mock_broadcast - def test_broadcast_limit(self): - self.control.broadcast( - 'foobarbaz1', arguments=[], limit=None, destination=[1, 2, 3], - ) - self.assertIn('foobarbaz1', MockMailbox.sent) - - @with_mock_broadcast - def test_broadcast_validate(self): - with self.assertRaises(ValueError): - self.control.broadcast('foobarbaz2', - destination='foo') - - @with_mock_broadcast - def test_rate_limit(self): - self.control.rate_limit(self.mytask.name, '100/m') - self.assertIn('rate_limit', MockMailbox.sent) - - @with_mock_broadcast - def test_time_limit(self): - self.control.time_limit(self.mytask.name, soft=10, hard=20) - self.assertIn('time_limit', MockMailbox.sent) - - @with_mock_broadcast - def test_add_consumer(self): - self.control.add_consumer('foo') - self.assertIn('add_consumer', MockMailbox.sent) - - @with_mock_broadcast - def test_cancel_consumer(self): - self.control.cancel_consumer('foo') - self.assertIn('cancel_consumer', MockMailbox.sent) - - @with_mock_broadcast - def test_enable_events(self): - self.control.enable_events() - self.assertIn('enable_events', MockMailbox.sent) - - @with_mock_broadcast - def test_disable_events(self): - self.control.disable_events() - self.assertIn('disable_events', MockMailbox.sent) - - @with_mock_broadcast - def test_revoke(self): - self.control.revoke('foozbaaz') - self.assertIn('revoke', MockMailbox.sent) - - @with_mock_broadcast - def test_ping(self): - self.control.ping() - self.assertIn('ping', MockMailbox.sent) - - @with_mock_broadcast - def test_election(self): - self.control.election('some_id', 'topic', 'action') - self.assertIn('election', MockMailbox.sent) - - @with_mock_broadcast - def test_pool_grow(self): - self.control.pool_grow(2) - self.assertIn('pool_grow', MockMailbox.sent) - - @with_mock_broadcast - def test_pool_shrink(self): - self.control.pool_shrink(2) - self.assertIn('pool_shrink', MockMailbox.sent) - - @with_mock_broadcast - def test_revoke_from_result(self): - self.app.AsyncResult('foozbazzbar').revoke() - self.assertIn('revoke', MockMailbox.sent) - - @with_mock_broadcast - def test_revoke_from_resultset(self): - r = self.app.GroupResult(uuid(), - [self.app.AsyncResult(x) - for x in [uuid() for i in range(10)]]) - r.revoke() - self.assertIn('revoke', MockMailbox.sent) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_defaults.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_defaults.py deleted file mode 100644 index bf87f80..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_defaults.py +++ /dev/null @@ -1,60 +0,0 @@ -from __future__ import absolute_import - -import sys - -from importlib import import_module - -from celery.app.defaults import NAMESPACES - -from celery.tests.case import ( - AppCase, Mock, patch, pypy_version, sys_platform, -) - - -class test_defaults(AppCase): - - def setup(self): - self._prev = sys.modules.pop('celery.app.defaults', None) - - def teardown(self): - if self._prev: - sys.modules['celery.app.defaults'] = self._prev - - def test_option_repr(self): - self.assertTrue(repr(NAMESPACES['BROKER']['URL'])) - - def test_any(self): - val = object() - self.assertIs(self.defaults.Option.typemap['any'](val), val) - - def test_default_pool_pypy_14(self): - with sys_platform('darwin'): - with pypy_version((1, 4, 0)): - self.assertEqual(self.defaults.DEFAULT_POOL, 'solo') - - def test_default_pool_pypy_15(self): - with sys_platform('darwin'): - with pypy_version((1, 5, 0)): - self.assertEqual(self.defaults.DEFAULT_POOL, 'prefork') - - def test_deprecated(self): - source = Mock() - source.CELERYD_LOG_LEVEL = 2 - with patch('celery.utils.warn_deprecated') as warn: - self.defaults.find_deprecated_settings(source) - self.assertTrue(warn.called) - - def test_default_pool_jython(self): - with sys_platform('java 1.6.51'): - self.assertEqual(self.defaults.DEFAULT_POOL, 'threads') - - def test_find(self): - find = self.defaults.find - - self.assertEqual(find('server_email')[2].default, 'celery@localhost') - self.assertEqual(find('default_queue')[2].default, 'celery') - self.assertEqual(find('celery_default_exchange')[2], 'celery') - - @property - def defaults(self): - return import_module('celery.app.defaults') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_exceptions.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_exceptions.py deleted file mode 100644 index 25d2b4e..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_exceptions.py +++ /dev/null @@ -1,35 +0,0 @@ -from __future__ import absolute_import - -import pickle - -from datetime import datetime - -from celery.exceptions import Reject, Retry - -from celery.tests.case import AppCase - - -class test_Retry(AppCase): - - def test_when_datetime(self): - x = Retry('foo', KeyError(), when=datetime.utcnow()) - self.assertTrue(x.humanize()) - - def test_pickleable(self): - x = Retry('foo', KeyError(), when=datetime.utcnow()) - self.assertTrue(pickle.loads(pickle.dumps(x))) - - -class test_Reject(AppCase): - - def test_attrs(self): - x = Reject('foo', requeue=True) - self.assertEqual(x.reason, 'foo') - self.assertTrue(x.requeue) - - def test_repr(self): - self.assertTrue(repr(Reject('foo', True))) - - def test_pickleable(self): - x = Retry('foo', True) - self.assertTrue(pickle.loads(pickle.dumps(x))) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_loaders.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_loaders.py deleted file mode 100644 index cc9fb55..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_loaders.py +++ /dev/null @@ -1,275 +0,0 @@ -from __future__ import absolute_import - -import os -import sys -import warnings - -from celery import loaders -from celery.exceptions import ( - NotConfigured, -) -from celery.loaders import base -from celery.loaders import default -from celery.loaders.app import AppLoader -from celery.utils.imports import NotAPackage -from celery.utils.mail import SendmailWarning - -from celery.tests.case import ( - AppCase, Case, Mock, depends_on_current_app, patch, with_environ, -) - - -class DummyLoader(base.BaseLoader): - - def read_configuration(self): - return {'foo': 'bar', 'CELERY_IMPORTS': ('os', 'sys')} - - -class test_loaders(AppCase): - - def test_get_loader_cls(self): - self.assertEqual(loaders.get_loader_cls('default'), - default.Loader) - - @depends_on_current_app - def test_current_loader(self): - with self.assertPendingDeprecation(): - self.assertIs(loaders.current_loader(), self.app.loader) - - @depends_on_current_app - def test_load_settings(self): - with self.assertPendingDeprecation(): - self.assertIs(loaders.load_settings(), self.app.conf) - - -class test_LoaderBase(AppCase): - message_options = {'subject': 'Subject', - 'body': 'Body', - 'sender': 'x@x.com', - 'to': 'y@x.com'} - server_options = {'host': 'smtp.x.com', - 'port': 1234, - 'user': 'x', - 'password': 'qwerty', - 'timeout': 3} - - def setup(self): - self.loader = DummyLoader(app=self.app) - - def test_handlers_pass(self): - self.loader.on_task_init('foo.task', 'feedface-cafebabe') - self.loader.on_worker_init() - - def test_now(self): - self.assertTrue(self.loader.now(utc=True)) - self.assertTrue(self.loader.now(utc=False)) - - def test_read_configuration_no_env(self): - self.assertDictEqual( - base.BaseLoader(app=self.app).read_configuration( - 'FOO_X_S_WE_WQ_Q_WE'), - {}, - ) - - def test_autodiscovery(self): - with patch('celery.loaders.base.autodiscover_tasks') as auto: - auto.return_value = [Mock()] - auto.return_value[0].__name__ = 'moo' - self.loader.autodiscover_tasks(['A', 'B']) - self.assertIn('moo', self.loader.task_modules) - self.loader.task_modules.discard('moo') - - def test_import_task_module(self): - self.assertEqual(sys, self.loader.import_task_module('sys')) - - def test_init_worker_process(self): - self.loader.on_worker_process_init() - m = self.loader.on_worker_process_init = Mock() - self.loader.init_worker_process() - m.assert_called_with() - - def test_config_from_object_module(self): - self.loader.import_from_cwd = Mock() - self.loader.config_from_object('module_name') - self.loader.import_from_cwd.assert_called_with('module_name') - - def test_conf_property(self): - self.assertEqual(self.loader.conf['foo'], 'bar') - self.assertEqual(self.loader._conf['foo'], 'bar') - self.assertEqual(self.loader.conf['foo'], 'bar') - - def test_import_default_modules(self): - def modnames(l): - return [m.__name__ for m in l] - self.app.conf.CELERY_IMPORTS = ('os', 'sys') - self.assertEqual( - sorted(modnames(self.loader.import_default_modules())), - sorted(modnames([os, sys])), - ) - - def test_import_from_cwd_custom_imp(self): - - def imp(module, package=None): - imp.called = True - imp.called = False - - self.loader.import_from_cwd('foo', imp=imp) - self.assertTrue(imp.called) - - @patch('celery.utils.mail.Mailer._send') - def test_mail_admins_errors(self, send): - send.side_effect = KeyError() - opts = dict(self.message_options, **self.server_options) - - with self.assertWarnsRegex(SendmailWarning, r'KeyError'): - self.loader.mail_admins(fail_silently=True, **opts) - - with self.assertRaises(KeyError): - self.loader.mail_admins(fail_silently=False, **opts) - - @patch('celery.utils.mail.Mailer._send') - def test_mail_admins(self, send): - opts = dict(self.message_options, **self.server_options) - self.loader.mail_admins(**opts) - self.assertTrue(send.call_args) - message = send.call_args[0][0] - self.assertEqual(message.to, [self.message_options['to']]) - self.assertEqual(message.subject, self.message_options['subject']) - self.assertEqual(message.sender, self.message_options['sender']) - self.assertEqual(message.body, self.message_options['body']) - - def test_mail_attribute(self): - from celery.utils import mail - loader = base.BaseLoader(app=self.app) - self.assertIs(loader.mail, mail) - - def test_cmdline_config_ValueError(self): - with self.assertRaises(ValueError): - self.loader.cmdline_config_parser(['broker.port=foobar']) - - -class test_DefaultLoader(AppCase): - - @patch('celery.loaders.base.find_module') - def test_read_configuration_not_a_package(self, find_module): - find_module.side_effect = NotAPackage() - l = default.Loader(app=self.app) - with self.assertRaises(NotAPackage): - l.read_configuration(fail_silently=False) - - @patch('celery.loaders.base.find_module') - @with_environ('CELERY_CONFIG_MODULE', 'celeryconfig.py') - def test_read_configuration_py_in_name(self, find_module): - find_module.side_effect = NotAPackage() - l = default.Loader(app=self.app) - with self.assertRaises(NotAPackage): - l.read_configuration(fail_silently=False) - - @patch('celery.loaders.base.find_module') - def test_read_configuration_importerror(self, find_module): - default.C_WNOCONF = True - find_module.side_effect = ImportError() - l = default.Loader(app=self.app) - with self.assertWarnsRegex(NotConfigured, r'make sure it exists'): - l.read_configuration(fail_silently=True) - default.C_WNOCONF = False - l.read_configuration(fail_silently=True) - - def test_read_configuration(self): - from types import ModuleType - - class ConfigModule(ModuleType): - pass - - configname = os.environ.get('CELERY_CONFIG_MODULE') or 'celeryconfig' - celeryconfig = ConfigModule(configname) - celeryconfig.CELERY_IMPORTS = ('os', 'sys') - - prevconfig = sys.modules.get(configname) - sys.modules[configname] = celeryconfig - try: - l = default.Loader(app=self.app) - l.find_module = Mock(name='find_module') - settings = l.read_configuration(fail_silently=False) - self.assertTupleEqual(settings.CELERY_IMPORTS, ('os', 'sys')) - settings = l.read_configuration(fail_silently=False) - self.assertTupleEqual(settings.CELERY_IMPORTS, ('os', 'sys')) - l.on_worker_init() - finally: - if prevconfig: - sys.modules[configname] = prevconfig - - def test_import_from_cwd(self): - l = default.Loader(app=self.app) - old_path = list(sys.path) - try: - sys.path.remove(os.getcwd()) - except ValueError: - pass - celery = sys.modules.pop('celery', None) - sys.modules.pop('celery.five', None) - try: - self.assertTrue(l.import_from_cwd('celery')) - sys.modules.pop('celery', None) - sys.modules.pop('celery.five', None) - sys.path.insert(0, os.getcwd()) - self.assertTrue(l.import_from_cwd('celery')) - finally: - sys.path = old_path - sys.modules['celery'] = celery - - def test_unconfigured_settings(self): - context_executed = [False] - - class _Loader(default.Loader): - - def find_module(self, name): - raise ImportError(name) - - with warnings.catch_warnings(record=True): - l = _Loader(app=self.app) - self.assertFalse(l.configured) - context_executed[0] = True - self.assertTrue(context_executed[0]) - - -class test_AppLoader(AppCase): - - def setup(self): - self.loader = AppLoader(app=self.app) - - def test_on_worker_init(self): - self.app.conf.CELERY_IMPORTS = ('subprocess', ) - sys.modules.pop('subprocess', None) - self.loader.init_worker() - self.assertIn('subprocess', sys.modules) - - -class test_autodiscovery(Case): - - def test_autodiscover_tasks(self): - base._RACE_PROTECTION = True - try: - base.autodiscover_tasks(['foo']) - finally: - base._RACE_PROTECTION = False - with patch('celery.loaders.base.find_related_module') as frm: - base.autodiscover_tasks(['foo']) - self.assertTrue(frm.called) - - def test_find_related_module(self): - with patch('importlib.import_module') as imp: - with patch('imp.find_module') as find: - imp.return_value = Mock() - imp.return_value.__path__ = 'foo' - base.find_related_module(base, 'tasks') - - def se1(val): - imp.side_effect = AttributeError() - - imp.side_effect = se1 - base.find_related_module(base, 'tasks') - imp.side_effect = None - - find.side_effect = ImportError() - base.find_related_module(base, 'tasks') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_log.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_log.py deleted file mode 100644 index 588e39b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_log.py +++ /dev/null @@ -1,385 +0,0 @@ -from __future__ import absolute_import - -import sys -import logging - -from collections import defaultdict -from io import StringIO -from tempfile import mktemp - -from celery import signals -from celery.app.log import TaskFormatter -from celery.utils.log import LoggingProxy -from celery.utils import uuid -from celery.utils.log import ( - get_logger, - ColorFormatter, - logger as base_logger, - get_task_logger, - task_logger, - in_sighandler, - logger_isa, - ensure_process_aware_logger, -) -from celery.tests.case import ( - AppCase, Mock, SkipTest, - get_handlers, override_stdouts, patch, wrap_logger, restore_logging, -) - - -class test_TaskFormatter(AppCase): - - def test_no_task(self): - class Record(object): - msg = 'hello world' - levelname = 'info' - exc_text = exc_info = None - stack_info = None - - def getMessage(self): - return self.msg - record = Record() - x = TaskFormatter() - x.format(record) - self.assertEqual(record.task_name, '???') - self.assertEqual(record.task_id, '???') - - -class test_logger_isa(AppCase): - - def test_isa(self): - x = get_task_logger('Z1george') - self.assertTrue(logger_isa(x, task_logger)) - prev_x, x.parent = x.parent, None - try: - self.assertFalse(logger_isa(x, task_logger)) - finally: - x.parent = prev_x - - y = get_task_logger('Z1elaine') - y.parent = x - self.assertTrue(logger_isa(y, task_logger)) - self.assertTrue(logger_isa(y, x)) - self.assertTrue(logger_isa(y, y)) - - z = get_task_logger('Z1jerry') - z.parent = y - self.assertTrue(logger_isa(z, task_logger)) - self.assertTrue(logger_isa(z, y)) - self.assertTrue(logger_isa(z, x)) - self.assertTrue(logger_isa(z, z)) - - def test_recursive(self): - x = get_task_logger('X1foo') - prev, x.parent = x.parent, x - try: - with self.assertRaises(RuntimeError): - logger_isa(x, task_logger) - finally: - x.parent = prev - - y = get_task_logger('X2foo') - z = get_task_logger('X2foo') - prev_y, y.parent = y.parent, z - try: - prev_z, z.parent = z.parent, y - try: - with self.assertRaises(RuntimeError): - logger_isa(y, task_logger) - finally: - z.parent = prev_z - finally: - y.parent = prev_y - - -class test_ColorFormatter(AppCase): - - @patch('celery.utils.log.safe_str') - @patch('logging.Formatter.formatException') - def test_formatException_not_string(self, fe, safe_str): - x = ColorFormatter() - value = KeyError() - fe.return_value = value - self.assertIs(x.formatException(value), value) - self.assertTrue(fe.called) - self.assertFalse(safe_str.called) - - @patch('logging.Formatter.formatException') - @patch('celery.utils.log.safe_str') - def test_formatException_string(self, safe_str, fe): - x = ColorFormatter() - fe.return_value = 'HELLO' - try: - raise Exception() - except Exception: - self.assertTrue(x.formatException(sys.exc_info())) - if sys.version_info[0] == 2: - self.assertTrue(safe_str.called) - - @patch('logging.Formatter.format') - def test_format_object(self, _format): - x = ColorFormatter() - x.use_color = True - record = Mock() - record.levelname = 'ERROR' - record.msg = object() - self.assertTrue(x.format(record)) - - @patch('celery.utils.log.safe_str') - def test_format_raises(self, safe_str): - x = ColorFormatter() - - def on_safe_str(s): - try: - raise ValueError('foo') - finally: - safe_str.side_effect = None - safe_str.side_effect = on_safe_str - - class Record(object): - levelname = 'ERROR' - msg = 'HELLO' - exc_info = 1 - exc_text = 'error text' - stack_info = None - - def __str__(self): - return on_safe_str('') - - def getMessage(self): - return self.msg - - record = Record() - safe_str.return_value = record - - msg = x.format(record) - self.assertIn('= 3: - raise - else: - break - - def assertRelativedelta(self, due, last_ran): - try: - from dateutil.relativedelta import relativedelta - except ImportError: - return - l1, d1, n1 = due.remaining_delta(last_ran) - l2, d2, n2 = due.remaining_delta(last_ran, ffwd=relativedelta) - if not isinstance(d1, relativedelta): - self.assertEqual(l1, l2) - for field, value in items(d1._fields()): - self.assertEqual(getattr(d1, field), value) - self.assertFalse(d2.years) - self.assertFalse(d2.months) - self.assertFalse(d2.days) - self.assertFalse(d2.leapdays) - self.assertFalse(d2.hours) - self.assertFalse(d2.minutes) - self.assertFalse(d2.seconds) - self.assertFalse(d2.microseconds) - - def test_every_minute_execution_is_due(self): - last_ran = self.now - timedelta(seconds=61) - due, remaining = self.every_minute.is_due(last_ran) - self.assertRelativedelta(self.every_minute, last_ran) - self.assertTrue(due) - self.seconds_almost_equal(remaining, self.next_minute, 1) - - def test_every_minute_execution_is_not_due(self): - last_ran = self.now - timedelta(seconds=self.now.second) - due, remaining = self.every_minute.is_due(last_ran) - self.assertFalse(due) - self.seconds_almost_equal(remaining, self.next_minute, 1) - - def test_execution_is_due_on_saturday(self): - # 29th of May 2010 is a saturday - with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 29, 10, 30)): - last_ran = self.now - timedelta(seconds=61) - due, remaining = self.every_minute.is_due(last_ran) - self.assertTrue(due) - self.seconds_almost_equal(remaining, self.next_minute, 1) - - def test_execution_is_due_on_sunday(self): - # 30th of May 2010 is a sunday - with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 30, 10, 30)): - last_ran = self.now - timedelta(seconds=61) - due, remaining = self.every_minute.is_due(last_ran) - self.assertTrue(due) - self.seconds_almost_equal(remaining, self.next_minute, 1) - - def test_execution_is_due_on_monday(self): - # 31st of May 2010 is a monday - with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 31, 10, 30)): - last_ran = self.now - timedelta(seconds=61) - due, remaining = self.every_minute.is_due(last_ran) - self.assertTrue(due) - self.seconds_almost_equal(remaining, self.next_minute, 1) - - def test_every_hour_execution_is_due(self): - with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 10, 10, 30)): - due, remaining = self.hourly.is_due(datetime(2010, 5, 10, 6, 30)) - self.assertTrue(due) - self.assertEqual(remaining, 60 * 60) - - def test_every_hour_execution_is_not_due(self): - with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 10, 10, 29)): - due, remaining = self.hourly.is_due(datetime(2010, 5, 10, 9, 30)) - self.assertFalse(due) - self.assertEqual(remaining, 60) - - def test_first_quarter_execution_is_due(self): - with patch_crontab_nowfun( - self.quarterly, datetime(2010, 5, 10, 10, 15)): - due, remaining = self.quarterly.is_due( - datetime(2010, 5, 10, 6, 30), - ) - self.assertTrue(due) - self.assertEqual(remaining, 15 * 60) - - def test_second_quarter_execution_is_due(self): - with patch_crontab_nowfun( - self.quarterly, datetime(2010, 5, 10, 10, 30)): - due, remaining = self.quarterly.is_due( - datetime(2010, 5, 10, 6, 30), - ) - self.assertTrue(due) - self.assertEqual(remaining, 15 * 60) - - def test_first_quarter_execution_is_not_due(self): - with patch_crontab_nowfun( - self.quarterly, datetime(2010, 5, 10, 10, 14)): - due, remaining = self.quarterly.is_due( - datetime(2010, 5, 10, 10, 0), - ) - self.assertFalse(due) - self.assertEqual(remaining, 60) - - def test_second_quarter_execution_is_not_due(self): - with patch_crontab_nowfun( - self.quarterly, datetime(2010, 5, 10, 10, 29)): - due, remaining = self.quarterly.is_due( - datetime(2010, 5, 10, 10, 15), - ) - self.assertFalse(due) - self.assertEqual(remaining, 60) - - def test_daily_execution_is_due(self): - with patch_crontab_nowfun(self.daily, datetime(2010, 5, 10, 7, 30)): - due, remaining = self.daily.is_due(datetime(2010, 5, 9, 7, 30)) - self.assertTrue(due) - self.assertEqual(remaining, 24 * 60 * 60) - - def test_daily_execution_is_not_due(self): - with patch_crontab_nowfun(self.daily, datetime(2010, 5, 10, 10, 30)): - due, remaining = self.daily.is_due(datetime(2010, 5, 10, 7, 30)) - self.assertFalse(due) - self.assertEqual(remaining, 21 * 60 * 60) - - def test_weekly_execution_is_due(self): - with patch_crontab_nowfun(self.weekly, datetime(2010, 5, 6, 7, 30)): - due, remaining = self.weekly.is_due(datetime(2010, 4, 30, 7, 30)) - self.assertTrue(due) - self.assertEqual(remaining, 7 * 24 * 60 * 60) - - def test_weekly_execution_is_not_due(self): - with patch_crontab_nowfun(self.weekly, datetime(2010, 5, 7, 10, 30)): - due, remaining = self.weekly.is_due(datetime(2010, 5, 6, 7, 30)) - self.assertFalse(due) - self.assertEqual(remaining, 6 * 24 * 60 * 60 - 3 * 60 * 60) - - def test_monthly_execution_is_due(self): - with patch_crontab_nowfun(self.monthly, datetime(2010, 5, 13, 7, 30)): - due, remaining = self.monthly.is_due(datetime(2010, 4, 8, 7, 30)) - self.assertTrue(due) - self.assertEqual(remaining, 28 * 24 * 60 * 60) - - def test_monthly_execution_is_not_due(self): - with patch_crontab_nowfun(self.monthly, datetime(2010, 5, 9, 10, 30)): - due, remaining = self.monthly.is_due(datetime(2010, 4, 8, 7, 30)) - self.assertFalse(due) - self.assertEqual(remaining, 4 * 24 * 60 * 60 - 3 * 60 * 60) - - def test_monthly_moy_execution_is_due(self): - with patch_crontab_nowfun( - self.monthly_moy, datetime(2014, 2, 26, 22, 0)): - due, remaining = self.monthly_moy.is_due( - datetime(2013, 7, 4, 10, 0), - ) - self.assertTrue(due) - self.assertEqual(remaining, 60.) - - def test_monthly_moy_execution_is_not_due(self): - raise SkipTest('unstable test') - with patch_crontab_nowfun( - self.monthly_moy, datetime(2013, 6, 28, 14, 30)): - due, remaining = self.monthly_moy.is_due( - datetime(2013, 6, 28, 22, 14), - ) - self.assertFalse(due) - attempt = ( - time.mktime(datetime(2014, 2, 26, 22, 0).timetuple()) - - time.mktime(datetime(2013, 6, 28, 14, 30).timetuple()) - - 60 * 60 - ) - self.assertEqual(remaining, attempt) - - def test_monthly_moy_execution_is_due2(self): - with patch_crontab_nowfun( - self.monthly_moy, datetime(2014, 2, 26, 22, 0)): - due, remaining = self.monthly_moy.is_due( - datetime(2013, 2, 28, 10, 0), - ) - self.assertTrue(due) - self.assertEqual(remaining, 60.) - - def test_monthly_moy_execution_is_not_due2(self): - with patch_crontab_nowfun( - self.monthly_moy, datetime(2014, 2, 26, 21, 0)): - due, remaining = self.monthly_moy.is_due( - datetime(2013, 6, 28, 22, 14), - ) - self.assertFalse(due) - attempt = 60 * 60 - self.assertEqual(remaining, attempt) - - def test_yearly_execution_is_due(self): - with patch_crontab_nowfun(self.yearly, datetime(2010, 3, 11, 7, 30)): - due, remaining = self.yearly.is_due(datetime(2009, 3, 12, 7, 30)) - self.assertTrue(due) - self.assertEqual(remaining, 364 * 24 * 60 * 60) - - def test_yearly_execution_is_not_due(self): - with patch_crontab_nowfun(self.yearly, datetime(2010, 3, 7, 10, 30)): - due, remaining = self.yearly.is_due(datetime(2009, 3, 12, 7, 30)) - self.assertFalse(due) - self.assertEqual(remaining, 4 * 24 * 60 * 60 - 3 * 60 * 60) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_utils.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_utils.py deleted file mode 100644 index b0ff108..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_utils.py +++ /dev/null @@ -1,46 +0,0 @@ -from __future__ import absolute_import - -from collections import Mapping, MutableMapping - -from celery.app.utils import Settings, filter_hidden_settings, bugreport - -from celery.tests.case import AppCase, Mock - - -class TestSettings(AppCase): - """ - Tests of celery.app.utils.Settings - """ - def test_is_mapping(self): - """Settings should be a collections.Mapping""" - self.assertTrue(issubclass(Settings, Mapping)) - - def test_is_mutable_mapping(self): - """Settings should be a collections.MutableMapping""" - self.assertTrue(issubclass(Settings, MutableMapping)) - - -class test_filter_hidden_settings(AppCase): - - def test_handles_non_string_keys(self): - """filter_hidden_settings shouldn't raise an exception when handling - mappings with non-string keys""" - conf = { - 'STRING_KEY': 'VALUE1', - ('NON', 'STRING', 'KEY'): 'VALUE2', - 'STRING_KEY2': { - 'STRING_KEY3': 1, - ('NON', 'STRING', 'KEY', '2'): 2 - }, - } - filter_hidden_settings(conf) - - -class test_bugreport(AppCase): - - def test_no_conn_driver_info(self): - self.app.connection = Mock() - conn = self.app.connection.return_value = Mock() - conn.transport = None - - bugreport(self.app) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_amqp.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_amqp.py deleted file mode 100644 index 282f8b1..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_amqp.py +++ /dev/null @@ -1,406 +0,0 @@ -from __future__ import absolute_import - -import json -import pickle -import socket - -from contextlib import contextmanager -from datetime import timedelta -from pickle import dumps, loads - -from billiard.einfo import ExceptionInfo - -from celery import states -from celery.backends.amqp import AMQPBackend -from celery.exceptions import TimeoutError -from celery.five import Empty, Queue, range -from celery.utils import uuid - -from celery.tests.case import ( - AppCase, Mock, depends_on_current_app, patch, sleepdeprived, -) - - -class SomeClass(object): - - def __init__(self, data): - self.data = data - - -class test_AMQPBackend(AppCase): - - def create_backend(self, **opts): - opts = dict(dict(serializer='pickle', persistent=True), **opts) - return AMQPBackend(self.app, **opts) - - def test_mark_as_done(self): - tb1 = self.create_backend(max_cached_results=1) - tb2 = self.create_backend(max_cached_results=1) - - tid = uuid() - - tb1.mark_as_done(tid, 42) - self.assertEqual(tb2.get_status(tid), states.SUCCESS) - self.assertEqual(tb2.get_result(tid), 42) - self.assertTrue(tb2._cache.get(tid)) - self.assertTrue(tb2.get_result(tid), 42) - - @depends_on_current_app - def test_pickleable(self): - self.assertTrue(loads(dumps(self.create_backend()))) - - def test_revive(self): - tb = self.create_backend() - tb.revive(None) - - def test_is_pickled(self): - tb1 = self.create_backend() - tb2 = self.create_backend() - - tid2 = uuid() - result = {'foo': 'baz', 'bar': SomeClass(12345)} - tb1.mark_as_done(tid2, result) - # is serialized properly. - rindb = tb2.get_result(tid2) - self.assertEqual(rindb.get('foo'), 'baz') - self.assertEqual(rindb.get('bar').data, 12345) - - def test_mark_as_failure(self): - tb1 = self.create_backend() - tb2 = self.create_backend() - - tid3 = uuid() - try: - raise KeyError('foo') - except KeyError as exception: - einfo = ExceptionInfo() - tb1.mark_as_failure(tid3, exception, traceback=einfo.traceback) - self.assertEqual(tb2.get_status(tid3), states.FAILURE) - self.assertIsInstance(tb2.get_result(tid3), KeyError) - self.assertEqual(tb2.get_traceback(tid3), einfo.traceback) - - def test_repair_uuid(self): - from celery.backends.amqp import repair_uuid - for i in range(10): - tid = uuid() - self.assertEqual(repair_uuid(tid.replace('-', '')), tid) - - def test_expires_is_int(self): - b = self.create_backend(expires=48) - self.assertEqual(b.queue_arguments.get('x-expires'), 48 * 1000.0) - - def test_expires_is_float(self): - b = self.create_backend(expires=48.3) - self.assertEqual(b.queue_arguments.get('x-expires'), 48.3 * 1000.0) - - def test_expires_is_timedelta(self): - b = self.create_backend(expires=timedelta(minutes=1)) - self.assertEqual(b.queue_arguments.get('x-expires'), 60 * 1000.0) - - @sleepdeprived() - def test_store_result_retries(self): - iterations = [0] - stop_raising_at = [5] - - def publish(*args, **kwargs): - if iterations[0] > stop_raising_at[0]: - return - iterations[0] += 1 - raise KeyError('foo') - - backend = AMQPBackend(self.app) - from celery.app.amqp import TaskProducer - prod, TaskProducer.publish = TaskProducer.publish, publish - try: - with self.assertRaises(KeyError): - backend.retry_policy['max_retries'] = None - backend.store_result('foo', 'bar', 'STARTED') - - with self.assertRaises(KeyError): - backend.retry_policy['max_retries'] = 10 - backend.store_result('foo', 'bar', 'STARTED') - finally: - TaskProducer.publish = prod - - def assertState(self, retval, state): - self.assertEqual(retval['status'], state) - - def test_poll_no_messages(self): - b = self.create_backend() - self.assertState(b.get_task_meta(uuid()), states.PENDING) - - @contextmanager - def _result_context(self, serializer='pickle'): - results = Queue() - - class Message(object): - acked = 0 - requeued = 0 - - def __init__(self, **merge): - self.payload = dict({'status': states.STARTED, - 'result': None}, **merge) - if serializer == 'json': - self.body = json.dumps(self.payload) - self.content_type = 'application/json' - else: - self.body = pickle.dumps(self.payload) - self.content_type = 'application/x-python-serialize' - self.content_encoding = 'binary' - - def ack(self, *args, **kwargs): - self.acked += 1 - - def requeue(self, *args, **kwargs): - self.requeued += 1 - - class MockBinding(object): - - def __init__(self, *args, **kwargs): - self.channel = Mock() - - def __call__(self, *args, **kwargs): - return self - - def declare(self): - pass - - def get(self, no_ack=False, accept=None): - try: - m = results.get(block=False) - if m: - m.accept = accept - return m - except Empty: - pass - - def is_bound(self): - return True - - class MockBackend(AMQPBackend): - Queue = MockBinding - - backend = MockBackend(self.app, max_cached_results=100) - backend.serializer = serializer - backend._republish = Mock() - - yield results, backend, Message - - def test_backlog_limit_exceeded(self): - with self._result_context() as (results, backend, Message): - for i in range(1001): - results.put(Message(task_id='id', status=states.RECEIVED)) - with self.assertRaises(backend.BacklogLimitExceeded): - backend.get_task_meta('id') - - def test_poll_result(self): - with self._result_context() as (results, backend, Message): - tid = uuid() - # FFWD's to the latest state. - state_messages = [ - Message(task_id=tid, status=states.RECEIVED, seq=1), - Message(task_id=tid, status=states.STARTED, seq=2), - Message(task_id=tid, status=states.FAILURE, seq=3), - ] - for state_message in state_messages: - results.put(state_message) - r1 = backend.get_task_meta(tid) - self.assertDictContainsSubset( - { - 'status': states.FAILURE, - 'seq': 3 - }, r1, 'FFWDs to the last state', - ) - - # Caches last known state. - tid = uuid() - results.put(Message(task_id=tid)) - backend.get_task_meta(tid) - self.assertIn(tid, backend._cache, 'Caches last known state') - - self.assertTrue(state_messages[-1].requeued) - - # Returns cache if no new states. - results.queue.clear() - assert not results.qsize() - backend._cache[tid] = 'hello' - self.assertEqual( - backend.get_task_meta(tid), 'hello', - 'Returns cache if no new states', - ) - - def test_poll_result_for_json_serializer(self): - with self._result_context(serializer='json') as ( - results, backend, Message): - tid = uuid() - # FFWD's to the latest state. - state_messages = [ - Message(task_id=tid, status=states.RECEIVED, seq=1), - Message(task_id=tid, status=states.STARTED, seq=2), - Message(task_id=tid, status=states.FAILURE, seq=3, - result={ - 'exc_type': 'RuntimeError', - 'exc_message': 'Mock' - }), - ] - for state_message in state_messages: - results.put(state_message) - r1 = backend.get_task_meta(tid) - self.assertDictContainsSubset({ - 'status': states.FAILURE, - 'seq': 3 - }, r1, 'FFWDs to the last state') - self.assertEquals(type(r1['result']).__name__, 'RuntimeError') - self.assertEqual(str(r1['result']), 'Mock') - - # Caches last known state. - tid = uuid() - results.put(Message(task_id=tid)) - backend.get_task_meta(tid) - self.assertIn(tid, backend._cache, 'Caches last known state') - - self.assertTrue(state_messages[-1].requeued) - - # Returns cache if no new states. - results.queue.clear() - assert not results.qsize() - backend._cache[tid] = 'hello' - self.assertEqual( - backend.get_task_meta(tid), 'hello', - 'Returns cache if no new states', - ) - - def test_wait_for(self): - b = self.create_backend() - - tid = uuid() - with self.assertRaises(TimeoutError): - b.wait_for(tid, timeout=0.1) - b.store_result(tid, None, states.STARTED) - with self.assertRaises(TimeoutError): - b.wait_for(tid, timeout=0.1) - b.store_result(tid, None, states.RETRY) - with self.assertRaises(TimeoutError): - b.wait_for(tid, timeout=0.1) - b.store_result(tid, 42, states.SUCCESS) - self.assertEqual(b.wait_for(tid, timeout=1)['result'], 42) - b.store_result(tid, 56, states.SUCCESS) - self.assertEqual(b.wait_for(tid, timeout=1)['result'], 42, - 'result is cached') - self.assertEqual(b.wait_for(tid, timeout=1, cache=False)['result'], 56) - b.store_result(tid, KeyError('foo'), states.FAILURE) - res = b.wait_for(tid, timeout=1, cache=False) - self.assertEqual(res['status'], states.FAILURE) - b.store_result(tid, KeyError('foo'), states.PENDING) - with self.assertRaises(TimeoutError): - b.wait_for(tid, timeout=0.01, cache=False) - - def test_drain_events_remaining_timeouts(self): - - class Connection(object): - - def drain_events(self, timeout=None): - pass - - b = self.create_backend() - with self.app.pool.acquire_channel(block=False) as (_, channel): - binding = b._create_binding(uuid()) - consumer = b.Consumer(channel, binding, no_ack=True) - with self.assertRaises(socket.timeout): - b.drain_events(Connection(), consumer, timeout=0.1) - - def test_get_many(self): - b = self.create_backend(max_cached_results=10) - - tids = [] - for i in range(10): - tid = uuid() - b.store_result(tid, i, states.SUCCESS) - tids.append(tid) - - res = list(b.get_many(tids, timeout=1)) - expected_results = [ - (task_id, { - 'status': states.SUCCESS, - 'result': i, - 'traceback': None, - 'task_id': task_id, - 'children': None, - }) - for i, task_id in enumerate(tids) - ] - self.assertEqual(sorted(res), sorted(expected_results)) - self.assertDictEqual(b._cache[res[0][0]], res[0][1]) - cached_res = list(b.get_many(tids, timeout=1)) - self.assertEqual(sorted(cached_res), sorted(expected_results)) - - # times out when not ready in cache (this shouldn't happen) - b._cache[res[0][0]]['status'] = states.RETRY - with self.assertRaises(socket.timeout): - list(b.get_many(tids, timeout=0.01)) - - # times out when result not yet ready - with self.assertRaises(socket.timeout): - tids = [uuid()] - b.store_result(tids[0], i, states.PENDING) - list(b.get_many(tids, timeout=0.01)) - - def test_get_many_raises_outer_block(self): - - class Backend(AMQPBackend): - - def Consumer(*args, **kwargs): - raise KeyError('foo') - - b = Backend(self.app) - with self.assertRaises(KeyError): - next(b.get_many(['id1'])) - - def test_get_many_raises_inner_block(self): - with patch('kombu.connection.Connection.drain_events') as drain: - drain.side_effect = KeyError('foo') - b = AMQPBackend(self.app) - with self.assertRaises(KeyError): - next(b.get_many(['id1'])) - - def test_consume_raises_inner_block(self): - with patch('kombu.connection.Connection.drain_events') as drain: - - def se(*args, **kwargs): - drain.side_effect = ValueError() - raise KeyError('foo') - drain.side_effect = se - b = AMQPBackend(self.app) - with self.assertRaises(ValueError): - next(b.consume('id1')) - - def test_no_expires(self): - b = self.create_backend(expires=None) - app = self.app - app.conf.CELERY_TASK_RESULT_EXPIRES = None - b = self.create_backend(expires=None) - with self.assertRaises(KeyError): - b.queue_arguments['x-expires'] - - def test_process_cleanup(self): - self.create_backend().process_cleanup() - - def test_reload_task_result(self): - with self.assertRaises(NotImplementedError): - self.create_backend().reload_task_result('x') - - def test_reload_group_result(self): - with self.assertRaises(NotImplementedError): - self.create_backend().reload_group_result('x') - - def test_save_group(self): - with self.assertRaises(NotImplementedError): - self.create_backend().save_group('x', 'x') - - def test_restore_group(self): - with self.assertRaises(NotImplementedError): - self.create_backend().restore_group('x') - - def test_delete_group(self): - with self.assertRaises(NotImplementedError): - self.create_backend().delete_group('x') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_backends.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_backends.py deleted file mode 100644 index d301e55..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_backends.py +++ /dev/null @@ -1,41 +0,0 @@ -from __future__ import absolute_import - -from celery import backends -from celery.exceptions import ImproperlyConfigured -from celery.backends.amqp import AMQPBackend -from celery.backends.cache import CacheBackend -from celery.tests.case import AppCase, depends_on_current_app, patch - - -class test_backends(AppCase): - - def test_get_backend_aliases(self): - expects = [('amqp://', AMQPBackend), - ('cache+memory://', CacheBackend)] - - for url, expect_cls in expects: - backend, url = backends.get_backend_by_url(url, self.app.loader) - self.assertIsInstance( - backend(app=self.app, url=url), - expect_cls, - ) - - def test_unknown_backend(self): - with self.assertRaises(ImportError): - backends.get_backend_cls('fasodaopjeqijwqe', self.app.loader) - - @depends_on_current_app - def test_default_backend(self): - self.assertEqual(backends.default_backend, self.app.backend) - - def test_backend_by_url(self, url='redis://localhost/1'): - from celery.backends.redis import RedisBackend - backend, url_ = backends.get_backend_by_url(url, self.app.loader) - self.assertIs(backend, RedisBackend) - self.assertEqual(url_, url) - - def test_sym_raises_ValuError(self): - with patch('celery.backends.symbol_by_name') as sbn: - sbn.side_effect = ValueError() - with self.assertRaises(ImproperlyConfigured): - backends.get_backend_cls('xxx.xxx:foo', self.app.loader) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_base.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_base.py deleted file mode 100644 index f54dc07..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_base.py +++ /dev/null @@ -1,466 +0,0 @@ -from __future__ import absolute_import - -import sys -import types - -from contextlib import contextmanager - -from celery.exceptions import ChordError -from celery.five import items, range -from celery.utils import serialization -from celery.utils.serialization import subclass_exception -from celery.utils.serialization import find_pickleable_exception as fnpe -from celery.utils.serialization import UnpickleableExceptionWrapper -from celery.utils.serialization import get_pickleable_exception as gpe - -from celery import states -from celery import group -from celery.backends.base import ( - BaseBackend, - KeyValueStoreBackend, - DisabledBackend, -) -from celery.result import result_from_tuple -from celery.utils import uuid - -from celery.tests.case import AppCase, Mock, SkipTest, patch - - -class wrapobject(object): - - def __init__(self, *args, **kwargs): - self.args = args - -if sys.version_info[0] == 3 or getattr(sys, 'pypy_version_info', None): - Oldstyle = None -else: - Oldstyle = types.ClassType('Oldstyle', (), {}) -Unpickleable = subclass_exception('Unpickleable', KeyError, 'foo.module') -Impossible = subclass_exception('Impossible', object, 'foo.module') -Lookalike = subclass_exception('Lookalike', wrapobject, 'foo.module') - - -class test_serialization(AppCase): - - def test_create_exception_cls(self): - self.assertTrue(serialization.create_exception_cls('FooError', 'm')) - self.assertTrue(serialization.create_exception_cls('FooError', 'm', - KeyError)) - - -class test_BaseBackend_interface(AppCase): - - def setup(self): - self.b = BaseBackend(self.app) - - def test__forget(self): - with self.assertRaises(NotImplementedError): - self.b._forget('SOMExx-N0Nex1stant-IDxx-') - - def test_forget(self): - with self.assertRaises(NotImplementedError): - self.b.forget('SOMExx-N0nex1stant-IDxx-') - - def test_on_chord_part_return(self): - self.b.on_chord_part_return(None, None, None) - - def test_apply_chord(self, unlock='celery.chord_unlock'): - self.app.tasks[unlock] = Mock() - self.b.apply_chord( - group(app=self.app), (), 'dakj221', None, - result=[self.app.AsyncResult(x) for x in [1, 2, 3]], - ) - self.assertTrue(self.app.tasks[unlock].apply_async.call_count) - - -class test_exception_pickle(AppCase): - - def test_oldstyle(self): - if Oldstyle is None: - raise SkipTest('py3k does not support old style classes') - self.assertTrue(fnpe(Oldstyle())) - - def test_BaseException(self): - self.assertIsNone(fnpe(Exception())) - - def test_get_pickleable_exception(self): - exc = Exception('foo') - self.assertEqual(gpe(exc), exc) - - def test_unpickleable(self): - self.assertIsInstance(fnpe(Unpickleable()), KeyError) - self.assertIsNone(fnpe(Impossible())) - - -class test_prepare_exception(AppCase): - - def setup(self): - self.b = BaseBackend(self.app) - - def test_unpickleable(self): - x = self.b.prepare_exception(Unpickleable(1, 2, 'foo')) - self.assertIsInstance(x, KeyError) - y = self.b.exception_to_python(x) - self.assertIsInstance(y, KeyError) - - def test_impossible(self): - x = self.b.prepare_exception(Impossible()) - self.assertIsInstance(x, UnpickleableExceptionWrapper) - self.assertTrue(str(x)) - y = self.b.exception_to_python(x) - self.assertEqual(y.__class__.__name__, 'Impossible') - if sys.version_info < (2, 5): - self.assertTrue(y.__class__.__module__) - else: - self.assertEqual(y.__class__.__module__, 'foo.module') - - def test_regular(self): - x = self.b.prepare_exception(KeyError('baz')) - self.assertIsInstance(x, KeyError) - y = self.b.exception_to_python(x) - self.assertIsInstance(y, KeyError) - - -class KVBackend(KeyValueStoreBackend): - mget_returns_dict = False - - def __init__(self, app, *args, **kwargs): - self.db = {} - super(KVBackend, self).__init__(app) - - def get(self, key): - return self.db.get(key) - - def set(self, key, value): - self.db[key] = value - - def mget(self, keys): - if self.mget_returns_dict: - return dict((key, self.get(key)) for key in keys) - else: - return [self.get(k) for k in keys] - - def delete(self, key): - self.db.pop(key, None) - - -class DictBackend(BaseBackend): - - def __init__(self, *args, **kwargs): - BaseBackend.__init__(self, *args, **kwargs) - self._data = {'can-delete': {'result': 'foo'}} - - def _restore_group(self, group_id): - if group_id == 'exists': - return {'result': 'group'} - - def _get_task_meta_for(self, task_id): - if task_id == 'task-exists': - return {'result': 'task'} - - def _delete_group(self, group_id): - self._data.pop(group_id, None) - - -class test_BaseBackend_dict(AppCase): - - def setup(self): - self.b = DictBackend(app=self.app) - - def test_delete_group(self): - self.b.delete_group('can-delete') - self.assertNotIn('can-delete', self.b._data) - - def test_prepare_exception_json(self): - x = DictBackend(self.app, serializer='json') - e = x.prepare_exception(KeyError('foo')) - self.assertIn('exc_type', e) - e = x.exception_to_python(e) - self.assertEqual(e.__class__.__name__, 'KeyError') - self.assertEqual(str(e), "'foo'") - - def test_save_group(self): - b = BaseBackend(self.app) - b._save_group = Mock() - b.save_group('foofoo', 'xxx') - b._save_group.assert_called_with('foofoo', 'xxx') - - def test_forget_interface(self): - b = BaseBackend(self.app) - with self.assertRaises(NotImplementedError): - b.forget('foo') - - def test_restore_group(self): - self.assertIsNone(self.b.restore_group('missing')) - self.assertIsNone(self.b.restore_group('missing')) - self.assertEqual(self.b.restore_group('exists'), 'group') - self.assertEqual(self.b.restore_group('exists'), 'group') - self.assertEqual(self.b.restore_group('exists', cache=False), 'group') - - def test_reload_group_result(self): - self.b._cache = {} - self.b.reload_group_result('exists') - self.b._cache['exists'] = {'result': 'group'} - - def test_reload_task_result(self): - self.b._cache = {} - self.b.reload_task_result('task-exists') - self.b._cache['task-exists'] = {'result': 'task'} - - def test_fail_from_current_stack(self): - self.b.mark_as_failure = Mock() - try: - raise KeyError('foo') - except KeyError as exc: - self.b.fail_from_current_stack('task_id') - self.assertTrue(self.b.mark_as_failure.called) - args = self.b.mark_as_failure.call_args[0] - self.assertEqual(args[0], 'task_id') - self.assertIs(args[1], exc) - self.assertTrue(args[2]) - - def test_prepare_value_serializes_group_result(self): - self.b.serializer = 'json' - g = self.app.GroupResult('group_id', [self.app.AsyncResult('foo')]) - v = self.b.prepare_value(g) - self.assertIsInstance(v, (list, tuple)) - self.assertEqual(result_from_tuple(v, app=self.app), g) - - v2 = self.b.prepare_value(g[0]) - self.assertIsInstance(v2, (list, tuple)) - self.assertEqual(result_from_tuple(v2, app=self.app), g[0]) - - self.b.serializer = 'pickle' - self.assertIsInstance(self.b.prepare_value(g), self.app.GroupResult) - - def test_is_cached(self): - b = BaseBackend(app=self.app, max_cached_results=1) - b._cache['foo'] = 1 - self.assertTrue(b.is_cached('foo')) - self.assertFalse(b.is_cached('false')) - - -class test_KeyValueStoreBackend(AppCase): - - def setup(self): - self.b = KVBackend(app=self.app) - - def test_on_chord_part_return(self): - assert not self.b.implements_incr - self.b.on_chord_part_return(None, None, None) - - def test_get_store_delete_result(self): - tid = uuid() - self.b.mark_as_done(tid, 'Hello world') - self.assertEqual(self.b.get_result(tid), 'Hello world') - self.assertEqual(self.b.get_status(tid), states.SUCCESS) - self.b.forget(tid) - self.assertEqual(self.b.get_status(tid), states.PENDING) - - def test_strip_prefix(self): - x = self.b.get_key_for_task('x1b34') - self.assertEqual(self.b._strip_prefix(x), 'x1b34') - self.assertEqual(self.b._strip_prefix('x1b34'), 'x1b34') - - def test_get_many(self): - for is_dict in True, False: - self.b.mget_returns_dict = is_dict - ids = dict((uuid(), i) for i in range(10)) - for id, i in items(ids): - self.b.mark_as_done(id, i) - it = self.b.get_many(list(ids)) - for i, (got_id, got_state) in enumerate(it): - self.assertEqual(got_state['result'], ids[got_id]) - self.assertEqual(i, 9) - self.assertTrue(list(self.b.get_many(list(ids)))) - - def test_get_many_times_out(self): - tasks = [uuid() for _ in range(4)] - self.b._cache[tasks[1]] = {'status': 'PENDING'} - with self.assertRaises(self.b.TimeoutError): - list(self.b.get_many(tasks, timeout=0.01, interval=0.01)) - - def test_chord_part_return_no_gid(self): - self.b.implements_incr = True - task = Mock() - state = 'SUCCESS' - result = 10 - task.request.group = None - self.b.get_key_for_chord = Mock() - self.b.get_key_for_chord.side_effect = AssertionError( - 'should not get here', - ) - self.assertIsNone(self.b.on_chord_part_return(task, state, result)) - - @contextmanager - def _chord_part_context(self, b): - - @self.app.task(shared=False) - def callback(result): - pass - - b.implements_incr = True - b.client = Mock() - with patch('celery.backends.base.GroupResult') as GR: - deps = GR.restore.return_value = Mock(name='DEPS') - deps.__len__ = Mock() - deps.__len__.return_value = 10 - b.incr = Mock() - b.incr.return_value = 10 - b.expire = Mock() - task = Mock() - task.request.group = 'grid' - cb = task.request.chord = callback.s() - task.request.chord.freeze() - callback.backend = b - callback.backend.fail_from_current_stack = Mock() - yield task, deps, cb - - def test_chord_part_return_propagate_set(self): - with self._chord_part_context(self.b) as (task, deps, _): - self.b.on_chord_part_return(task, 'SUCCESS', 10, propagate=True) - self.assertFalse(self.b.expire.called) - deps.delete.assert_called_with() - deps.join_native.assert_called_with(propagate=True, timeout=3.0) - - def test_chord_part_return_propagate_default(self): - with self._chord_part_context(self.b) as (task, deps, _): - self.b.on_chord_part_return(task, 'SUCCESS', 10, propagate=None) - self.assertFalse(self.b.expire.called) - deps.delete.assert_called_with() - deps.join_native.assert_called_with( - propagate=self.b.app.conf.CELERY_CHORD_PROPAGATES, - timeout=3.0, - ) - - def test_chord_part_return_join_raises_internal(self): - with self._chord_part_context(self.b) as (task, deps, callback): - deps._failed_join_report = lambda: iter([]) - deps.join_native.side_effect = KeyError('foo') - self.b.on_chord_part_return(task, 'SUCCESS', 10) - self.assertTrue(self.b.fail_from_current_stack.called) - args = self.b.fail_from_current_stack.call_args - exc = args[1]['exc'] - self.assertIsInstance(exc, ChordError) - self.assertIn('foo', str(exc)) - - def test_chord_part_return_join_raises_task(self): - b = KVBackend(serializer='pickle', app=self.app) - with self._chord_part_context(b) as (task, deps, callback): - deps._failed_join_report = lambda: iter([ - self.app.AsyncResult('culprit'), - ]) - deps.join_native.side_effect = KeyError('foo') - b.on_chord_part_return(task, 'SUCCESS', 10) - self.assertTrue(b.fail_from_current_stack.called) - args = b.fail_from_current_stack.call_args - exc = args[1]['exc'] - self.assertIsInstance(exc, ChordError) - self.assertIn('Dependency culprit raised', str(exc)) - - def test_restore_group_from_json(self): - b = KVBackend(serializer='json', app=self.app) - g = self.app.GroupResult( - 'group_id', - [self.app.AsyncResult('a'), self.app.AsyncResult('b')], - ) - b._save_group(g.id, g) - g2 = b._restore_group(g.id)['result'] - self.assertEqual(g2, g) - - def test_restore_group_from_pickle(self): - b = KVBackend(serializer='pickle', app=self.app) - g = self.app.GroupResult( - 'group_id', - [self.app.AsyncResult('a'), self.app.AsyncResult('b')], - ) - b._save_group(g.id, g) - g2 = b._restore_group(g.id)['result'] - self.assertEqual(g2, g) - - def test_chord_apply_fallback(self): - self.b.implements_incr = False - self.b.fallback_chord_unlock = Mock() - self.b.apply_chord( - group(app=self.app), (), 'group_id', 'body', - result='result', foo=1, - ) - self.b.fallback_chord_unlock.assert_called_with( - 'group_id', 'body', result='result', foo=1, - ) - - def test_get_missing_meta(self): - self.assertIsNone(self.b.get_result('xxx-missing')) - self.assertEqual(self.b.get_status('xxx-missing'), states.PENDING) - - def test_save_restore_delete_group(self): - tid = uuid() - tsr = self.app.GroupResult( - tid, [self.app.AsyncResult(uuid()) for _ in range(10)], - ) - self.b.save_group(tid, tsr) - self.b.restore_group(tid) - self.assertEqual(self.b.restore_group(tid), tsr) - self.b.delete_group(tid) - self.assertIsNone(self.b.restore_group(tid)) - - def test_restore_missing_group(self): - self.assertIsNone(self.b.restore_group('xxx-nonexistant')) - - -class test_KeyValueStoreBackend_interface(AppCase): - - def test_get(self): - with self.assertRaises(NotImplementedError): - KeyValueStoreBackend(self.app).get('a') - - def test_set(self): - with self.assertRaises(NotImplementedError): - KeyValueStoreBackend(self.app).set('a', 1) - - def test_incr(self): - with self.assertRaises(NotImplementedError): - KeyValueStoreBackend(self.app).incr('a') - - def test_cleanup(self): - self.assertFalse(KeyValueStoreBackend(self.app).cleanup()) - - def test_delete(self): - with self.assertRaises(NotImplementedError): - KeyValueStoreBackend(self.app).delete('a') - - def test_mget(self): - with self.assertRaises(NotImplementedError): - KeyValueStoreBackend(self.app).mget(['a']) - - def test_forget(self): - with self.assertRaises(NotImplementedError): - KeyValueStoreBackend(self.app).forget('a') - - -class test_DisabledBackend(AppCase): - - def test_store_result(self): - DisabledBackend(self.app).store_result() - - def test_is_disabled(self): - with self.assertRaises(NotImplementedError): - DisabledBackend(self.app).get_status('foo') - - def test_as_uri(self): - self.assertEqual(DisabledBackend(self.app).as_uri(), 'disabled://') - - -class test_as_uri(AppCase): - - def setup(self): - self.b = BaseBackend( - app=self.app, - url='sch://uuuu:pwpw@hostname.dom' - ) - - def test_as_uri_include_password(self): - self.assertEqual(self.b.as_uri(True), self.b.url) - - def test_as_uri_exclude_password(self): - self.assertEqual(self.b.as_uri(), 'sch://uuuu:**@hostname.dom/') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_cache.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_cache.py deleted file mode 100644 index fcd8dd5..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_cache.py +++ /dev/null @@ -1,280 +0,0 @@ -from __future__ import absolute_import - -import sys -import types - -from contextlib import contextmanager - -from kombu.utils.encoding import str_to_bytes, ensure_bytes - -from celery import signature -from celery import states -from celery import group -from celery.backends.cache import CacheBackend, DummyClient, backends -from celery.exceptions import ImproperlyConfigured -from celery.five import items, string, text_t -from celery.utils import uuid - -from celery.tests.case import ( - AppCase, Mock, disable_stdouts, mask_modules, patch, reset_modules, -) - -PY3 = sys.version_info[0] == 3 - - -class SomeClass(object): - - def __init__(self, data): - self.data = data - - -class test_CacheBackend(AppCase): - - def setup(self): - self.tb = CacheBackend(backend='memory://', app=self.app) - self.tid = uuid() - self.old_get_best_memcached = backends['memcache'] - backends['memcache'] = lambda: (DummyClient, ensure_bytes) - - def teardown(self): - backends['memcache'] = self.old_get_best_memcached - - def test_no_backend(self): - self.app.conf.CELERY_CACHE_BACKEND = None - with self.assertRaises(ImproperlyConfigured): - CacheBackend(backend=None, app=self.app) - - def test_mark_as_done(self): - self.assertEqual(self.tb.get_status(self.tid), states.PENDING) - self.assertIsNone(self.tb.get_result(self.tid)) - - self.tb.mark_as_done(self.tid, 42) - self.assertEqual(self.tb.get_status(self.tid), states.SUCCESS) - self.assertEqual(self.tb.get_result(self.tid), 42) - - def test_is_pickled(self): - result = {'foo': 'baz', 'bar': SomeClass(12345)} - self.tb.mark_as_done(self.tid, result) - # is serialized properly. - rindb = self.tb.get_result(self.tid) - self.assertEqual(rindb.get('foo'), 'baz') - self.assertEqual(rindb.get('bar').data, 12345) - - def test_mark_as_failure(self): - try: - raise KeyError('foo') - except KeyError as exception: - self.tb.mark_as_failure(self.tid, exception) - self.assertEqual(self.tb.get_status(self.tid), states.FAILURE) - self.assertIsInstance(self.tb.get_result(self.tid), KeyError) - - def test_apply_chord(self): - tb = CacheBackend(backend='memory://', app=self.app) - gid, res = uuid(), [self.app.AsyncResult(uuid()) for _ in range(3)] - tb.apply_chord(group(app=self.app), (), gid, {}, result=res) - - @patch('celery.result.GroupResult.restore') - def test_on_chord_part_return(self, restore): - tb = CacheBackend(backend='memory://', app=self.app) - - deps = Mock() - deps.__len__ = Mock() - deps.__len__.return_value = 2 - restore.return_value = deps - task = Mock() - task.name = 'foobarbaz' - self.app.tasks['foobarbaz'] = task - task.request.chord = signature(task) - - gid, res = uuid(), [self.app.AsyncResult(uuid()) for _ in range(3)] - task.request.group = gid - tb.apply_chord(group(app=self.app), (), gid, {}, result=res) - - self.assertFalse(deps.join_native.called) - tb.on_chord_part_return(task, 'SUCCESS', 10) - self.assertFalse(deps.join_native.called) - - tb.on_chord_part_return(task, 'SUCCESS', 10) - deps.join_native.assert_called_with(propagate=True, timeout=3.0) - deps.delete.assert_called_with() - - def test_mget(self): - self.tb.set('foo', 1) - self.tb.set('bar', 2) - - self.assertDictEqual(self.tb.mget(['foo', 'bar']), - {'foo': 1, 'bar': 2}) - - def test_forget(self): - self.tb.mark_as_done(self.tid, {'foo': 'bar'}) - x = self.app.AsyncResult(self.tid, backend=self.tb) - x.forget() - self.assertIsNone(x.result) - - def test_process_cleanup(self): - self.tb.process_cleanup() - - def test_expires_as_int(self): - tb = CacheBackend(backend='memory://', expires=10, app=self.app) - self.assertEqual(tb.expires, 10) - - def test_unknown_backend_raises_ImproperlyConfigured(self): - with self.assertRaises(ImproperlyConfigured): - CacheBackend(backend='unknown://', app=self.app) - - def test_as_uri_no_servers(self): - self.assertEqual(self.tb.as_uri(), 'memory:///') - - def test_as_uri_one_server(self): - backend = 'memcache://127.0.0.1:11211/' - b = CacheBackend(backend=backend, app=self.app) - self.assertEqual(b.as_uri(), backend) - - def test_as_uri_multiple_servers(self): - backend = 'memcache://127.0.0.1:11211;127.0.0.2:11211;127.0.0.3/' - b = CacheBackend(backend=backend, app=self.app) - self.assertEqual(b.as_uri(), backend) - - @disable_stdouts - def test_regression_worker_startup_info(self): - self.app.conf.result_backend = ( - 'cache+memcached://127.0.0.1:11211;127.0.0.2:11211;127.0.0.3/' - ) - worker = self.app.Worker() - worker.on_start() - self.assertTrue(worker.startup_info()) - - -class MyMemcachedStringEncodingError(Exception): - pass - - -class MemcachedClient(DummyClient): - - def set(self, key, value, *args, **kwargs): - if PY3: - key_t, must_be, not_be, cod = bytes, 'string', 'bytes', 'decode' - else: - key_t, must_be, not_be, cod = text_t, 'bytes', 'string', 'encode' - if isinstance(key, key_t): - raise MyMemcachedStringEncodingError( - 'Keys must be {0}, not {1}. Convert your ' - 'strings using mystring.{2}(charset)!'.format( - must_be, not_be, cod)) - return super(MemcachedClient, self).set(key, value, *args, **kwargs) - - -class MockCacheMixin(object): - - @contextmanager - def mock_memcache(self): - memcache = types.ModuleType('memcache') - memcache.Client = MemcachedClient - memcache.Client.__module__ = memcache.__name__ - prev, sys.modules['memcache'] = sys.modules.get('memcache'), memcache - try: - yield True - finally: - if prev is not None: - sys.modules['memcache'] = prev - - @contextmanager - def mock_pylibmc(self): - pylibmc = types.ModuleType('pylibmc') - pylibmc.Client = MemcachedClient - pylibmc.Client.__module__ = pylibmc.__name__ - prev = sys.modules.get('pylibmc') - sys.modules['pylibmc'] = pylibmc - try: - yield True - finally: - if prev is not None: - sys.modules['pylibmc'] = prev - - -class test_get_best_memcache(AppCase, MockCacheMixin): - - def test_pylibmc(self): - with self.mock_pylibmc(): - with reset_modules('celery.backends.cache'): - from celery.backends import cache - cache._imp = [None] - self.assertEqual(cache.get_best_memcache()[0].__module__, - 'pylibmc') - - def test_memcache(self): - with self.mock_memcache(): - with reset_modules('celery.backends.cache'): - with mask_modules('pylibmc'): - from celery.backends import cache - cache._imp = [None] - self.assertEqual(cache.get_best_memcache()[0]().__module__, - 'memcache') - - def test_no_implementations(self): - with mask_modules('pylibmc', 'memcache'): - with reset_modules('celery.backends.cache'): - from celery.backends import cache - cache._imp = [None] - with self.assertRaises(ImproperlyConfigured): - cache.get_best_memcache() - - def test_cached(self): - with self.mock_pylibmc(): - with reset_modules('celery.backends.cache'): - from celery.backends import cache - cache._imp = [None] - cache.get_best_memcache()[0](behaviors={'foo': 'bar'}) - self.assertTrue(cache._imp[0]) - cache.get_best_memcache()[0]() - - def test_backends(self): - from celery.backends.cache import backends - with self.mock_memcache(): - for name, fun in items(backends): - self.assertTrue(fun()) - - -class test_memcache_key(AppCase, MockCacheMixin): - - def test_memcache_unicode_key(self): - with self.mock_memcache(): - with reset_modules('celery.backends.cache'): - with mask_modules('pylibmc'): - from celery.backends import cache - cache._imp = [None] - task_id, result = string(uuid()), 42 - b = cache.CacheBackend(backend='memcache', app=self.app) - b.store_result(task_id, result, status=states.SUCCESS) - self.assertEqual(b.get_result(task_id), result) - - def test_memcache_bytes_key(self): - with self.mock_memcache(): - with reset_modules('celery.backends.cache'): - with mask_modules('pylibmc'): - from celery.backends import cache - cache._imp = [None] - task_id, result = str_to_bytes(uuid()), 42 - b = cache.CacheBackend(backend='memcache', app=self.app) - b.store_result(task_id, result, status=states.SUCCESS) - self.assertEqual(b.get_result(task_id), result) - - def test_pylibmc_unicode_key(self): - with reset_modules('celery.backends.cache'): - with self.mock_pylibmc(): - from celery.backends import cache - cache._imp = [None] - task_id, result = string(uuid()), 42 - b = cache.CacheBackend(backend='memcache', app=self.app) - b.store_result(task_id, result, status=states.SUCCESS) - self.assertEqual(b.get_result(task_id), result) - - def test_pylibmc_bytes_key(self): - with reset_modules('celery.backends.cache'): - with self.mock_pylibmc(): - from celery.backends import cache - cache._imp = [None] - task_id, result = str_to_bytes(uuid()), 42 - b = cache.CacheBackend(backend='memcache', app=self.app) - b.store_result(task_id, result, status=states.SUCCESS) - self.assertEqual(b.get_result(task_id), result) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_cassandra.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_cassandra.py deleted file mode 100644 index 1a43be9..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_cassandra.py +++ /dev/null @@ -1,190 +0,0 @@ -from __future__ import absolute_import - -import socket - -from pickle import loads, dumps - -from celery import states -from celery.exceptions import ImproperlyConfigured -from celery.tests.case import ( - AppCase, Mock, mock_module, depends_on_current_app, -) - - -class Object(object): - pass - - -def install_exceptions(mod): - # py3k: cannot catch exceptions not ineheriting from BaseException. - - class NotFoundException(Exception): - pass - - class TException(Exception): - pass - - class InvalidRequestException(Exception): - pass - - class UnavailableException(Exception): - pass - - class TimedOutException(Exception): - pass - - class AllServersUnavailable(Exception): - pass - - mod.NotFoundException = NotFoundException - mod.TException = TException - mod.InvalidRequestException = InvalidRequestException - mod.TimedOutException = TimedOutException - mod.UnavailableException = UnavailableException - mod.AllServersUnavailable = AllServersUnavailable - - -class test_CassandraBackend(AppCase): - - def setup(self): - self.app.conf.update( - CASSANDRA_SERVERS=['example.com'], - CASSANDRA_KEYSPACE='keyspace', - CASSANDRA_COLUMN_FAMILY='columns', - ) - - def test_init_no_pycassa(self): - with mock_module('pycassa'): - from celery.backends import cassandra as mod - prev, mod.pycassa = mod.pycassa, None - try: - with self.assertRaises(ImproperlyConfigured): - mod.CassandraBackend(app=self.app) - finally: - mod.pycassa = prev - - def test_init_with_and_without_LOCAL_QUROM(self): - with mock_module('pycassa'): - from celery.backends import cassandra as mod - mod.pycassa = Mock() - install_exceptions(mod.pycassa) - cons = mod.pycassa.ConsistencyLevel = Object() - cons.LOCAL_QUORUM = 'foo' - - self.app.conf.CASSANDRA_READ_CONSISTENCY = 'LOCAL_FOO' - self.app.conf.CASSANDRA_WRITE_CONSISTENCY = 'LOCAL_FOO' - - mod.CassandraBackend(app=self.app) - cons.LOCAL_FOO = 'bar' - mod.CassandraBackend(app=self.app) - - # no servers raises ImproperlyConfigured - with self.assertRaises(ImproperlyConfigured): - self.app.conf.CASSANDRA_SERVERS = None - mod.CassandraBackend( - app=self.app, keyspace='b', column_family='c', - ) - - @depends_on_current_app - def test_reduce(self): - with mock_module('pycassa'): - from celery.backends.cassandra import CassandraBackend - self.assertTrue(loads(dumps(CassandraBackend(app=self.app)))) - - def test_get_task_meta_for(self): - with mock_module('pycassa'): - from celery.backends import cassandra as mod - mod.pycassa = Mock() - install_exceptions(mod.pycassa) - mod.Thrift = Mock() - install_exceptions(mod.Thrift) - x = mod.CassandraBackend(app=self.app) - Get_Column = x._get_column_family = Mock() - get_column = Get_Column.return_value = Mock() - get = get_column.get - META = get.return_value = { - 'task_id': 'task_id', - 'status': states.SUCCESS, - 'result': '1', - 'date_done': 'date', - 'traceback': '', - 'children': None, - } - x.decode = Mock() - x.detailed_mode = False - meta = x._get_task_meta_for('task_id') - self.assertEqual(meta['status'], states.SUCCESS) - - x.detailed_mode = True - row = get.return_value = Mock() - row.values.return_value = [Mock()] - x.decode.return_value = META - meta = x._get_task_meta_for('task_id') - self.assertEqual(meta['status'], states.SUCCESS) - x.decode.return_value = Mock() - - x.detailed_mode = False - get.side_effect = KeyError() - meta = x._get_task_meta_for('task_id') - self.assertEqual(meta['status'], states.PENDING) - - calls = [0] - end = [10] - - def work_eventually(*arg): - try: - if calls[0] > end[0]: - return META - raise socket.error() - finally: - calls[0] += 1 - get.side_effect = work_eventually - x._retry_timeout = 10 - x._retry_wait = 0.01 - meta = x._get_task_meta_for('task') - self.assertEqual(meta['status'], states.SUCCESS) - - x._retry_timeout = 0.1 - calls[0], end[0] = 0, 100 - with self.assertRaises(socket.error): - x._get_task_meta_for('task') - - def test_store_result(self): - with mock_module('pycassa'): - from celery.backends import cassandra as mod - mod.pycassa = Mock() - install_exceptions(mod.pycassa) - mod.Thrift = Mock() - install_exceptions(mod.Thrift) - x = mod.CassandraBackend(app=self.app) - Get_Column = x._get_column_family = Mock() - cf = Get_Column.return_value = Mock() - x.detailed_mode = False - x._store_result('task_id', 'result', states.SUCCESS) - self.assertTrue(cf.insert.called) - - cf.insert.reset() - x.detailed_mode = True - x._store_result('task_id', 'result', states.SUCCESS) - self.assertTrue(cf.insert.called) - - def test_process_cleanup(self): - with mock_module('pycassa'): - from celery.backends import cassandra as mod - x = mod.CassandraBackend(app=self.app) - x._column_family = None - x.process_cleanup() - - x._column_family = True - x.process_cleanup() - self.assertIsNone(x._column_family) - - def test_get_column_family(self): - with mock_module('pycassa'): - from celery.backends import cassandra as mod - mod.pycassa = Mock() - install_exceptions(mod.pycassa) - x = mod.CassandraBackend(app=self.app) - self.assertTrue(x._get_column_family()) - self.assertIsNotNone(x._column_family) - self.assertIs(x._get_column_family(), x._column_family) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_couchbase.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_couchbase.py deleted file mode 100644 index 3dc6aad..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_couchbase.py +++ /dev/null @@ -1,136 +0,0 @@ -from __future__ import absolute_import - -from celery.backends import couchbase as module -from celery.backends.couchbase import CouchBaseBackend -from celery.exceptions import ImproperlyConfigured -from celery import backends -from celery.tests.case import ( - AppCase, MagicMock, Mock, SkipTest, patch, sentinel, -) - -try: - import couchbase -except ImportError: - couchbase = None # noqa - -COUCHBASE_BUCKET = 'celery_bucket' - - -class test_CouchBaseBackend(AppCase): - - def setup(self): - if couchbase is None: - raise SkipTest('couchbase is not installed.') - self.backend = CouchBaseBackend(app=self.app) - - def test_init_no_couchbase(self): - """test init no couchbase raises""" - prev, module.couchbase = module.couchbase, None - try: - with self.assertRaises(ImproperlyConfigured): - CouchBaseBackend(app=self.app) - finally: - module.couchbase = prev - - def test_init_no_settings(self): - """test init no settings""" - self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = [] - with self.assertRaises(ImproperlyConfigured): - CouchBaseBackend(app=self.app) - - def test_init_settings_is_None(self): - """Test init settings is None""" - self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = None - CouchBaseBackend(app=self.app) - - def test_get_connection_connection_exists(self): - with patch('couchbase.connection.Connection') as mock_Connection: - self.backend._connection = sentinel._connection - - connection = self.backend._get_connection() - - self.assertEqual(sentinel._connection, connection) - self.assertFalse(mock_Connection.called) - - def test_get(self): - """test_get - - CouchBaseBackend.get should return and take two params - db conn to couchbase is mocked. - TODO Should test on key not exists - - """ - self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = {} - x = CouchBaseBackend(app=self.app) - x._connection = Mock() - mocked_get = x._connection.get = Mock() - mocked_get.return_value.value = sentinel.retval - # should return None - self.assertEqual(x.get('1f3fab'), sentinel.retval) - x._connection.get.assert_called_once_with('1f3fab') - - def test_set(self): - """test_set - - CouchBaseBackend.set should return None and take two params - db conn to couchbase is mocked. - - """ - self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = None - x = CouchBaseBackend(app=self.app) - x._connection = MagicMock() - x._connection.set = MagicMock() - # should return None - self.assertIsNone(x.set(sentinel.key, sentinel.value)) - - def test_delete(self): - """test_delete - - CouchBaseBackend.delete should return and take two params - db conn to couchbase is mocked. - TODO Should test on key not exists - - """ - self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = {} - x = CouchBaseBackend(app=self.app) - x._connection = Mock() - mocked_delete = x._connection.delete = Mock() - mocked_delete.return_value = None - # should return None - self.assertIsNone(x.delete('1f3fab')) - x._connection.delete.assert_called_once_with('1f3fab') - - def test_config_params(self): - """test_config_params - - celery.conf.CELERY_COUCHBASE_BACKEND_SETTINGS is properly set - """ - self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = { - 'bucket': 'mycoolbucket', - 'host': ['here.host.com', 'there.host.com'], - 'username': 'johndoe', - 'password': 'mysecret', - 'port': '1234', - } - x = CouchBaseBackend(app=self.app) - self.assertEqual(x.bucket, 'mycoolbucket') - self.assertEqual(x.host, ['here.host.com', 'there.host.com'],) - self.assertEqual(x.username, 'johndoe',) - self.assertEqual(x.password, 'mysecret') - self.assertEqual(x.port, 1234) - - def test_backend_by_url(self, url='couchbase://myhost/mycoolbucket'): - from celery.backends.couchbase import CouchBaseBackend - backend, url_ = backends.get_backend_by_url(url, self.app.loader) - self.assertIs(backend, CouchBaseBackend) - self.assertEqual(url_, url) - - def test_backend_params_by_url(self): - url = 'couchbase://johndoe:mysecret@myhost:123/mycoolbucket' - with self.Celery(backend=url) as app: - x = app.backend - self.assertEqual(x.bucket, 'mycoolbucket') - self.assertEqual(x.host, 'myhost') - self.assertEqual(x.username, 'johndoe') - self.assertEqual(x.password, 'mysecret') - self.assertEqual(x.port, 123) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_database.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_database.py deleted file mode 100644 index 6b5bf94..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_database.py +++ /dev/null @@ -1,196 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -from datetime import datetime - -from pickle import loads, dumps - -from celery import states -from celery.exceptions import ImproperlyConfigured -from celery.utils import uuid - -from celery.tests.case import ( - AppCase, - SkipTest, - depends_on_current_app, - mask_modules, - skip_if_pypy, - skip_if_jython, -) - -try: - import sqlalchemy # noqa -except ImportError: - DatabaseBackend = Task = TaskSet = retry = None # noqa -else: - from celery.backends.database import DatabaseBackend, retry - from celery.backends.database.models import Task, TaskSet - - -class SomeClass(object): - - def __init__(self, data): - self.data = data - - -class test_DatabaseBackend(AppCase): - - @skip_if_pypy - @skip_if_jython - def setup(self): - if DatabaseBackend is None: - raise SkipTest('sqlalchemy not installed') - self.uri = 'sqlite:///test.db' - - def test_retry_helper(self): - from celery.backends.database import DatabaseError - - calls = [0] - - @retry - def raises(): - calls[0] += 1 - raise DatabaseError(1, 2, 3) - - with self.assertRaises(DatabaseError): - raises(max_retries=5) - self.assertEqual(calls[0], 5) - - def test_missing_SQLAlchemy_raises_ImproperlyConfigured(self): - with mask_modules('sqlalchemy'): - from celery.backends.database import _sqlalchemy_installed - with self.assertRaises(ImproperlyConfigured): - _sqlalchemy_installed() - - def test_missing_dburi_raises_ImproperlyConfigured(self): - self.app.conf.CELERY_RESULT_DBURI = None - with self.assertRaises(ImproperlyConfigured): - DatabaseBackend(app=self.app) - - def test_missing_task_id_is_PENDING(self): - tb = DatabaseBackend(self.uri, app=self.app) - self.assertEqual(tb.get_status('xxx-does-not-exist'), states.PENDING) - - def test_missing_task_meta_is_dict_with_pending(self): - tb = DatabaseBackend(self.uri, app=self.app) - self.assertDictContainsSubset({ - 'status': states.PENDING, - 'task_id': 'xxx-does-not-exist-at-all', - 'result': None, - 'traceback': None - }, tb.get_task_meta('xxx-does-not-exist-at-all')) - - def test_mark_as_done(self): - tb = DatabaseBackend(self.uri, app=self.app) - - tid = uuid() - - self.assertEqual(tb.get_status(tid), states.PENDING) - self.assertIsNone(tb.get_result(tid)) - - tb.mark_as_done(tid, 42) - self.assertEqual(tb.get_status(tid), states.SUCCESS) - self.assertEqual(tb.get_result(tid), 42) - - def test_is_pickled(self): - tb = DatabaseBackend(self.uri, app=self.app) - - tid2 = uuid() - result = {'foo': 'baz', 'bar': SomeClass(12345)} - tb.mark_as_done(tid2, result) - # is serialized properly. - rindb = tb.get_result(tid2) - self.assertEqual(rindb.get('foo'), 'baz') - self.assertEqual(rindb.get('bar').data, 12345) - - def test_mark_as_started(self): - tb = DatabaseBackend(self.uri, app=self.app) - tid = uuid() - tb.mark_as_started(tid) - self.assertEqual(tb.get_status(tid), states.STARTED) - - def test_mark_as_revoked(self): - tb = DatabaseBackend(self.uri, app=self.app) - tid = uuid() - tb.mark_as_revoked(tid) - self.assertEqual(tb.get_status(tid), states.REVOKED) - - def test_mark_as_retry(self): - tb = DatabaseBackend(self.uri, app=self.app) - tid = uuid() - try: - raise KeyError('foo') - except KeyError as exception: - import traceback - trace = '\n'.join(traceback.format_stack()) - tb.mark_as_retry(tid, exception, traceback=trace) - self.assertEqual(tb.get_status(tid), states.RETRY) - self.assertIsInstance(tb.get_result(tid), KeyError) - self.assertEqual(tb.get_traceback(tid), trace) - - def test_mark_as_failure(self): - tb = DatabaseBackend(self.uri, app=self.app) - - tid3 = uuid() - try: - raise KeyError('foo') - except KeyError as exception: - import traceback - trace = '\n'.join(traceback.format_stack()) - tb.mark_as_failure(tid3, exception, traceback=trace) - self.assertEqual(tb.get_status(tid3), states.FAILURE) - self.assertIsInstance(tb.get_result(tid3), KeyError) - self.assertEqual(tb.get_traceback(tid3), trace) - - def test_forget(self): - tb = DatabaseBackend(self.uri, backend='memory://', app=self.app) - tid = uuid() - tb.mark_as_done(tid, {'foo': 'bar'}) - tb.mark_as_done(tid, {'foo': 'bar'}) - x = self.app.AsyncResult(tid, backend=tb) - x.forget() - self.assertIsNone(x.result) - - def test_process_cleanup(self): - tb = DatabaseBackend(self.uri, app=self.app) - tb.process_cleanup() - - @depends_on_current_app - def test_reduce(self): - tb = DatabaseBackend(self.uri, app=self.app) - self.assertTrue(loads(dumps(tb))) - - def test_save__restore__delete_group(self): - tb = DatabaseBackend(self.uri, app=self.app) - - tid = uuid() - res = {'something': 'special'} - self.assertEqual(tb.save_group(tid, res), res) - - res2 = tb.restore_group(tid) - self.assertEqual(res2, res) - - tb.delete_group(tid) - self.assertIsNone(tb.restore_group(tid)) - - self.assertIsNone(tb.restore_group('xxx-nonexisting-id')) - - def test_cleanup(self): - tb = DatabaseBackend(self.uri, app=self.app) - for i in range(10): - tb.mark_as_done(uuid(), 42) - tb.save_group(uuid(), {'foo': 'bar'}) - s = tb.ResultSession() - for t in s.query(Task).all(): - t.date_done = datetime.now() - tb.expires * 2 - for t in s.query(TaskSet).all(): - t.date_done = datetime.now() - tb.expires * 2 - s.commit() - s.close() - - tb.cleanup() - - def test_Task__repr__(self): - self.assertIn('foo', repr(Task('foo'))) - - def test_TaskSet__repr__(self): - self.assertIn('foo', repr(TaskSet('foo', None))) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_mongodb.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_mongodb.py deleted file mode 100644 index bce429f..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_mongodb.py +++ /dev/null @@ -1,366 +0,0 @@ -from __future__ import absolute_import - -import datetime -import uuid - -from pickle import loads, dumps - -from celery import states -from celery.backends import mongodb as module -from celery.backends.mongodb import MongoBackend, pymongo -from celery.exceptions import ImproperlyConfigured -from celery.tests.case import ( - AppCase, MagicMock, Mock, SkipTest, ANY, - depends_on_current_app, disable_stdouts, patch, sentinel, -) - -COLLECTION = 'taskmeta_celery' -TASK_ID = str(uuid.uuid1()) -MONGODB_HOST = 'localhost' -MONGODB_PORT = 27017 -MONGODB_USER = 'mongo' -MONGODB_PASSWORD = '1234' -MONGODB_DATABASE = 'testing' -MONGODB_COLLECTION = 'collection1' - - -class test_MongoBackend(AppCase): - - default_url = 'mongodb://uuuu:pwpw@hostname.dom/database' - replica_set_url = ( - 'mongodb://uuuu:pwpw@hostname.dom,' - 'hostname.dom/database?replicaSet=rs' - ) - sanitized_default_url = 'mongodb://uuuu:**@hostname.dom/database' - sanitized_replica_set_url = ( - 'mongodb://uuuu:**@hostname.dom/,' - 'hostname.dom/database?replicaSet=rs' - ) - - def setup(self): - if pymongo is None: - raise SkipTest('pymongo is not installed.') - - R = self._reset = {} - R['encode'], MongoBackend.encode = MongoBackend.encode, Mock() - R['decode'], MongoBackend.decode = MongoBackend.decode, Mock() - R['Binary'], module.Binary = module.Binary, Mock() - R['datetime'], datetime.datetime = datetime.datetime, Mock() - - self.backend = MongoBackend(app=self.app, url=self.default_url) - - def teardown(self): - MongoBackend.encode = self._reset['encode'] - MongoBackend.decode = self._reset['decode'] - module.Binary = self._reset['Binary'] - datetime.datetime = self._reset['datetime'] - - def test_init_no_mongodb(self): - prev, module.pymongo = module.pymongo, None - try: - with self.assertRaises(ImproperlyConfigured): - MongoBackend(app=self.app) - finally: - module.pymongo = prev - - def test_init_no_settings(self): - self.app.conf.CELERY_MONGODB_BACKEND_SETTINGS = [] - with self.assertRaises(ImproperlyConfigured): - MongoBackend(app=self.app) - - def test_init_settings_is_None(self): - self.app.conf.CELERY_MONGODB_BACKEND_SETTINGS = None - MongoBackend(app=self.app) - - def test_restore_group_no_entry(self): - x = MongoBackend(app=self.app) - x.collection = Mock() - fo = x.collection.find_one = Mock() - fo.return_value = None - self.assertIsNone(x._restore_group('1f3fab')) - - @depends_on_current_app - def test_reduce(self): - x = MongoBackend(app=self.app) - self.assertTrue(loads(dumps(x))) - - def test_get_connection_connection_exists(self): - - with patch('pymongo.MongoClient') as mock_Connection: - self.backend._connection = sentinel._connection - - connection = self.backend._get_connection() - - self.assertEqual(sentinel._connection, connection) - self.assertFalse(mock_Connection.called) - - def test_get_connection_no_connection_host(self): - - with patch('pymongo.MongoClient') as mock_Connection: - self.backend._connection = None - self.backend.host = MONGODB_HOST - self.backend.port = MONGODB_PORT - mock_Connection.return_value = sentinel.connection - - connection = self.backend._get_connection() - mock_Connection.assert_called_once_with( - host='mongodb://localhost:27017', - **self.backend._prepare_client_options() - ) - self.assertEqual(sentinel.connection, connection) - - def test_get_connection_no_connection_mongodb_uri(self): - - with patch('pymongo.MongoClient') as mock_Connection: - mongodb_uri = 'mongodb://%s:%d' % (MONGODB_HOST, MONGODB_PORT) - self.backend._connection = None - self.backend.host = mongodb_uri - - mock_Connection.return_value = sentinel.connection - - connection = self.backend._get_connection() - mock_Connection.assert_called_once_with( - host=mongodb_uri, **self.backend._prepare_client_options() - ) - self.assertEqual(sentinel.connection, connection) - - @patch('celery.backends.mongodb.MongoBackend._get_connection') - def test_get_database_no_existing(self, mock_get_connection): - # Should really check for combinations of these two, to be complete. - self.backend.user = MONGODB_USER - self.backend.password = MONGODB_PASSWORD - - mock_database = Mock() - mock_connection = MagicMock(spec=['__getitem__']) - mock_connection.__getitem__.return_value = mock_database - mock_get_connection.return_value = mock_connection - - database = self.backend.database - - self.assertTrue(database is mock_database) - self.assertTrue(self.backend.__dict__['database'] is mock_database) - mock_database.authenticate.assert_called_once_with( - MONGODB_USER, MONGODB_PASSWORD) - - @patch('celery.backends.mongodb.MongoBackend._get_connection') - def test_get_database_no_existing_no_auth(self, mock_get_connection): - # Should really check for combinations of these two, to be complete. - self.backend.user = None - self.backend.password = None - - mock_database = Mock() - mock_connection = MagicMock(spec=['__getitem__']) - mock_connection.__getitem__.return_value = mock_database - mock_get_connection.return_value = mock_connection - - database = self.backend.database - - self.assertTrue(database is mock_database) - self.assertFalse(mock_database.authenticate.called) - self.assertTrue(self.backend.__dict__['database'] is mock_database) - - def test_process_cleanup(self): - self.backend._connection = None - self.backend.process_cleanup() - self.assertEqual(self.backend._connection, None) - - self.backend._connection = 'not none' - self.backend.process_cleanup() - self.assertEqual(self.backend._connection, None) - - @patch('celery.backends.mongodb.MongoBackend._get_database') - def test_store_result(self, mock_get_database): - self.backend.taskmeta_collection = MONGODB_COLLECTION - - mock_database = MagicMock(spec=['__getitem__', '__setitem__']) - mock_collection = Mock() - - mock_get_database.return_value = mock_database - mock_database.__getitem__.return_value = mock_collection - - ret_val = self.backend._store_result( - sentinel.task_id, sentinel.result, sentinel.status) - - mock_get_database.assert_called_once_with() - mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) - mock_collection.save.assert_called_once_with(ANY) - self.assertEqual(sentinel.result, ret_val) - - @patch('celery.backends.mongodb.MongoBackend._get_database') - def test_get_task_meta_for(self, mock_get_database): - datetime.datetime = self._reset['datetime'] - self.backend.taskmeta_collection = MONGODB_COLLECTION - - mock_database = MagicMock(spec=['__getitem__', '__setitem__']) - mock_collection = Mock() - mock_collection.find_one.return_value = MagicMock() - - mock_get_database.return_value = mock_database - mock_database.__getitem__.return_value = mock_collection - - ret_val = self.backend._get_task_meta_for(sentinel.task_id) - - mock_get_database.assert_called_once_with() - mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) - self.assertEqual( - list(sorted(['status', 'task_id', 'date_done', 'traceback', - 'result', 'children'])), - list(sorted(ret_val.keys())), - ) - - @patch('celery.backends.mongodb.MongoBackend._get_database') - def test_get_task_meta_for_no_result(self, mock_get_database): - self.backend.taskmeta_collection = MONGODB_COLLECTION - - mock_database = MagicMock(spec=['__getitem__', '__setitem__']) - mock_collection = Mock() - mock_collection.find_one.return_value = None - - mock_get_database.return_value = mock_database - mock_database.__getitem__.return_value = mock_collection - - ret_val = self.backend._get_task_meta_for(sentinel.task_id) - - mock_get_database.assert_called_once_with() - mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) - self.assertEqual({'status': states.PENDING, 'result': None}, ret_val) - - @patch('celery.backends.mongodb.MongoBackend._get_database') - def test_save_group(self, mock_get_database): - self.backend.taskmeta_collection = MONGODB_COLLECTION - - mock_database = MagicMock(spec=['__getitem__', '__setitem__']) - mock_collection = Mock() - - mock_get_database.return_value = mock_database - mock_database.__getitem__.return_value = mock_collection - - ret_val = self.backend._save_group( - sentinel.taskset_id, sentinel.result) - - mock_get_database.assert_called_once_with() - mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) - mock_collection.save.assert_called_once_with(ANY) - self.assertEqual(sentinel.result, ret_val) - - @patch('celery.backends.mongodb.MongoBackend._get_database') - def test_restore_group(self, mock_get_database): - self.backend.taskmeta_collection = MONGODB_COLLECTION - - mock_database = MagicMock(spec=['__getitem__', '__setitem__']) - mock_collection = Mock() - mock_collection.find_one.return_value = MagicMock() - - mock_get_database.return_value = mock_database - mock_database.__getitem__.return_value = mock_collection - - ret_val = self.backend._restore_group(sentinel.taskset_id) - - mock_get_database.assert_called_once_with() - mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) - mock_collection.find_one.assert_called_once_with( - {'_id': sentinel.taskset_id}) - self.assertItemsEqual( - ['date_done', 'result', 'task_id'], - list(ret_val.keys()), - ) - - @patch('celery.backends.mongodb.MongoBackend._get_database') - def test_delete_group(self, mock_get_database): - self.backend.taskmeta_collection = MONGODB_COLLECTION - - mock_database = MagicMock(spec=['__getitem__', '__setitem__']) - mock_collection = Mock() - - mock_get_database.return_value = mock_database - mock_database.__getitem__.return_value = mock_collection - - self.backend._delete_group(sentinel.taskset_id) - - mock_get_database.assert_called_once_with() - mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) - mock_collection.remove.assert_called_once_with( - {'_id': sentinel.taskset_id}) - - @patch('celery.backends.mongodb.MongoBackend._get_database') - def test_forget(self, mock_get_database): - self.backend.taskmeta_collection = MONGODB_COLLECTION - - mock_database = MagicMock(spec=['__getitem__', '__setitem__']) - mock_collection = Mock() - - mock_get_database.return_value = mock_database - mock_database.__getitem__.return_value = mock_collection - - self.backend._forget(sentinel.task_id) - - mock_get_database.assert_called_once_with() - mock_database.__getitem__.assert_called_once_with( - MONGODB_COLLECTION) - mock_collection.remove.assert_called_once_with( - {'_id': sentinel.task_id}) - - @patch('celery.backends.mongodb.MongoBackend._get_database') - def test_cleanup(self, mock_get_database): - datetime.datetime = self._reset['datetime'] - self.backend.taskmeta_collection = MONGODB_COLLECTION - - mock_database = MagicMock(spec=['__getitem__', '__setitem__']) - self.backend.collections = mock_collection = Mock() - - mock_get_database.return_value = mock_database - mock_database.__getitem__.return_value = mock_collection - - self.backend.app.now = datetime.datetime.utcnow - self.backend.cleanup() - - mock_get_database.assert_called_once_with() - mock_database.__getitem__.assert_called_once_with( - MONGODB_COLLECTION) - self.assertTrue(mock_collection.remove.called) - - def test_get_database_authfailure(self): - x = MongoBackend(app=self.app) - x._get_connection = Mock() - conn = x._get_connection.return_value = {} - db = conn[x.database_name] = Mock() - db.authenticate.return_value = False - x.user = 'jerry' - x.password = 'cere4l' - with self.assertRaises(ImproperlyConfigured): - x._get_database() - db.authenticate.assert_called_with('jerry', 'cere4l') - - @patch('celery.backends.mongodb.detect_environment') - def test_prepare_client_options_for_ver_2(self, m_detect_env): - m_detect_env.return_value = 'default' - with patch('pymongo.version_tuple', new=(2, 6, 3)): - options = self.backend._prepare_client_options() - self.assertDictEqual(options, { - 'max_pool_size': self.backend.max_pool_size, - 'auto_start_request': False - }) - - def test_as_uri_include_password(self): - self.assertEqual(self.backend.as_uri(True), self.default_url) - - def test_as_uri_exclude_password(self): - self.assertEqual(self.backend.as_uri(), self.sanitized_default_url) - - def test_as_uri_include_password_replica_set(self): - backend = MongoBackend(app=self.app, url=self.replica_set_url) - self.assertEqual(backend.as_uri(True), self.replica_set_url) - - def test_as_uri_exclude_password_replica_set(self): - backend = MongoBackend(app=self.app, url=self.replica_set_url) - self.assertEqual(backend.as_uri(), self.sanitized_replica_set_url) - - @disable_stdouts - def test_regression_worker_startup_info(self): - self.app.conf.result_backend = ( - 'mongodb://user:password@host0.com:43437,host1.com:43437' - '/work4us?replicaSet=rs&ssl=true' - ) - worker = self.app.Worker() - worker.on_start() - self.assertTrue(worker.startup_info()) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_redis.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_redis.py deleted file mode 100644 index a0de4b7..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_redis.py +++ /dev/null @@ -1,282 +0,0 @@ -from __future__ import absolute_import - -from datetime import timedelta - -from pickle import loads, dumps - -from celery import signature -from celery import states -from celery import group -from celery import uuid -from celery.datastructures import AttributeDict -from celery.exceptions import ImproperlyConfigured -from celery.utils.timeutils import timedelta_seconds - -from celery.tests.case import ( - AppCase, Mock, MockCallbacks, SkipTest, depends_on_current_app, patch, -) - - -class Connection(object): - connected = True - - def disconnect(self): - self.connected = False - - -class Pipeline(object): - - def __init__(self, client): - self.client = client - self.steps = [] - - def __getattr__(self, attr): - - def add_step(*args, **kwargs): - self.steps.append((getattr(self.client, attr), args, kwargs)) - return self - return add_step - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - pass - - def execute(self): - return [step(*a, **kw) for step, a, kw in self.steps] - - -class Redis(MockCallbacks): - Connection = Connection - Pipeline = Pipeline - - def __init__(self, host=None, port=None, db=None, password=None, **kw): - self.host = host - self.port = port - self.db = db - self.password = password - self.keyspace = {} - self.expiry = {} - self.connection = self.Connection() - - def get(self, key): - return self.keyspace.get(key) - - def setex(self, key, value, expires): - self.set(key, value) - self.expire(key, expires) - - def set(self, key, value): - self.keyspace[key] = value - - def expire(self, key, expires): - self.expiry[key] = expires - return expires - - def delete(self, key): - return bool(self.keyspace.pop(key, None)) - - def pipeline(self): - return self.Pipeline(self) - - def _get_list(self, key): - try: - return self.keyspace[key] - except KeyError: - l = self.keyspace[key] = [] - return l - - def rpush(self, key, value): - self._get_list(key).append(value) - - def lrange(self, key, start, stop): - return self._get_list(key)[start:stop] - - def llen(self, key): - return len(self.keyspace.get(key) or []) - - -class redis(object): - VERSION = (2, 4, 10) - Redis = Redis - - class ConnectionPool(object): - - def __init__(self, **kwargs): - pass - - class UnixDomainSocketConnection(object): - - def __init__(self, **kwargs): - pass - - -class test_RedisBackend(AppCase): - - def get_backend(self): - from celery.backends.redis import RedisBackend - - class _RedisBackend(RedisBackend): - redis = redis - - return _RedisBackend - - def setup(self): - self.Backend = self.get_backend() - - @depends_on_current_app - def test_reduce(self): - try: - from celery.backends.redis import RedisBackend - x = RedisBackend(app=self.app, new_join=True) - self.assertTrue(loads(dumps(x))) - except ImportError: - raise SkipTest('redis not installed') - - def test_no_redis(self): - self.Backend.redis = None - with self.assertRaises(ImproperlyConfigured): - self.Backend(app=self.app, new_join=True) - - def test_url(self): - x = self.Backend( - 'redis://:bosco@vandelay.com:123//1', app=self.app, - new_join=True, - ) - self.assertTrue(x.connparams) - self.assertEqual(x.connparams['host'], 'vandelay.com') - self.assertEqual(x.connparams['db'], 1) - self.assertEqual(x.connparams['port'], 123) - self.assertEqual(x.connparams['password'], 'bosco') - - def test_socket_url(self): - x = self.Backend( - 'socket:///tmp/redis.sock?virtual_host=/3', app=self.app, - new_join=True, - ) - self.assertTrue(x.connparams) - self.assertEqual(x.connparams['path'], '/tmp/redis.sock') - self.assertIs( - x.connparams['connection_class'], - redis.UnixDomainSocketConnection, - ) - self.assertNotIn('host', x.connparams) - self.assertNotIn('port', x.connparams) - self.assertEqual(x.connparams['db'], 3) - - def test_compat_propertie(self): - x = self.Backend( - 'redis://:bosco@vandelay.com:123//1', app=self.app, - new_join=True, - ) - with self.assertPendingDeprecation(): - self.assertEqual(x.host, 'vandelay.com') - with self.assertPendingDeprecation(): - self.assertEqual(x.db, 1) - with self.assertPendingDeprecation(): - self.assertEqual(x.port, 123) - with self.assertPendingDeprecation(): - self.assertEqual(x.password, 'bosco') - - def test_conf_raises_KeyError(self): - self.app.conf = AttributeDict({ - 'CELERY_RESULT_SERIALIZER': 'json', - 'CELERY_MAX_CACHED_RESULTS': 1, - 'CELERY_ACCEPT_CONTENT': ['json'], - 'CELERY_TASK_RESULT_EXPIRES': None, - }) - self.Backend(app=self.app, new_join=True) - - def test_expires_defaults_to_config(self): - self.app.conf.CELERY_TASK_RESULT_EXPIRES = 10 - b = self.Backend(expires=None, app=self.app, new_join=True) - self.assertEqual(b.expires, 10) - - def test_expires_is_int(self): - b = self.Backend(expires=48, app=self.app, new_join=True) - self.assertEqual(b.expires, 48) - - def test_set_new_join_from_url_query(self): - b = self.Backend('redis://?new_join=True;foobar=1', app=self.app) - self.assertEqual(b.on_chord_part_return, b._new_chord_return) - self.assertEqual(b.apply_chord, b._new_chord_apply) - - def test_default_is_old_join(self): - b = self.Backend(app=self.app) - self.assertNotEqual(b.on_chord_part_return, b._new_chord_return) - self.assertNotEqual(b.apply_chord, b._new_chord_apply) - - def test_expires_is_None(self): - b = self.Backend(expires=None, app=self.app, new_join=True) - self.assertEqual(b.expires, timedelta_seconds( - self.app.conf.CELERY_TASK_RESULT_EXPIRES)) - - def test_expires_is_timedelta(self): - b = self.Backend( - expires=timedelta(minutes=1), app=self.app, new_join=1, - ) - self.assertEqual(b.expires, 60) - - def test_apply_chord(self): - self.Backend(app=self.app, new_join=True).apply_chord( - group(app=self.app), (), 'group_id', {}, - result=[self.app.AsyncResult(x) for x in [1, 2, 3]], - ) - - def test_mget(self): - b = self.Backend(app=self.app, new_join=True) - self.assertTrue(b.mget(['a', 'b', 'c'])) - b.client.mget.assert_called_with(['a', 'b', 'c']) - - def test_set_no_expire(self): - b = self.Backend(app=self.app, new_join=True) - b.expires = None - b.set('foo', 'bar') - - @patch('celery.result.GroupResult.restore') - def test_on_chord_part_return(self, restore): - b = self.Backend(app=self.app, new_join=True) - - def create_task(): - tid = uuid() - task = Mock(name='task-{0}'.format(tid)) - task.name = 'foobarbaz' - self.app.tasks['foobarbaz'] = task - task.request.chord = signature(task) - task.request.id = tid - task.request.chord['chord_size'] = 10 - task.request.group = 'group_id' - return task - - tasks = [create_task() for i in range(10)] - - for i in range(10): - b.on_chord_part_return(tasks[i], states.SUCCESS, i) - self.assertTrue(b.client.rpush.call_count) - b.client.rpush.reset_mock() - self.assertTrue(b.client.lrange.call_count) - gkey = b.get_key_for_group('group_id', '.j') - b.client.delete.assert_called_with(gkey) - b.client.expire.assert_called_with(gkey, 86400) - - def test_process_cleanup(self): - self.Backend(app=self.app, new_join=True).process_cleanup() - - def test_get_set_forget(self): - b = self.Backend(app=self.app, new_join=True) - tid = uuid() - b.store_result(tid, 42, states.SUCCESS) - self.assertEqual(b.get_status(tid), states.SUCCESS) - self.assertEqual(b.get_result(tid), 42) - b.forget(tid) - self.assertEqual(b.get_status(tid), states.PENDING) - - def test_set_expires(self): - b = self.Backend(expires=512, app=self.app, new_join=True) - tid = uuid() - key = b.get_key_for_task(tid) - b.store_result(tid, 42, states.SUCCESS) - b.client.expire.assert_called_with( - key, 512, - ) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_rpc.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_rpc.py deleted file mode 100644 index 6fe594c..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_rpc.py +++ /dev/null @@ -1,75 +0,0 @@ -from __future__ import absolute_import - -from celery.backends.rpc import RPCBackend -from celery._state import _task_stack - -from celery.tests.case import AppCase, Mock, patch - - -class test_RPCBackend(AppCase): - - def setup(self): - self.b = RPCBackend(app=self.app) - - def test_oid(self): - oid = self.b.oid - oid2 = self.b.oid - self.assertEqual(oid, oid2) - self.assertEqual(oid, self.app.oid) - - def test_interface(self): - self.b.on_reply_declare('task_id') - - def test_destination_for(self): - req = Mock(name='request') - req.reply_to = 'reply_to' - req.correlation_id = 'corid' - self.assertTupleEqual( - self.b.destination_for('task_id', req), - ('reply_to', 'corid'), - ) - task = Mock() - _task_stack.push(task) - try: - task.request.reply_to = 'reply_to' - task.request.correlation_id = 'corid' - self.assertTupleEqual( - self.b.destination_for('task_id', None), - ('reply_to', 'corid'), - ) - finally: - _task_stack.pop() - - with self.assertRaises(RuntimeError): - self.b.destination_for('task_id', None) - - def test_binding(self): - queue = self.b.binding - self.assertEqual(queue.name, self.b.oid) - self.assertEqual(queue.exchange, self.b.exchange) - self.assertEqual(queue.routing_key, self.b.oid) - self.assertFalse(queue.durable) - self.assertFalse(queue.auto_delete) - - def test_many_bindings(self): - self.assertListEqual( - self.b._many_bindings(['a', 'b']), - [self.b.binding], - ) - - def test_create_binding(self): - self.assertEqual(self.b._create_binding('id'), self.b.binding) - - def test_on_task_call(self): - with patch('celery.backends.rpc.maybe_declare') as md: - with self.app.amqp.producer_pool.acquire() as prod: - self.b.on_task_call(prod, 'task_id'), - md.assert_called_with( - self.b.binding(prod.channel), - retry=True, - ) - - def test_create_exchange(self): - ex = self.b._create_exchange('name') - self.assertIsInstance(ex, self.b.Exchange) - self.assertEqual(ex.name, '') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/proj/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/proj/__init__.py deleted file mode 100644 index ffe8fb0..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/proj/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from __future__ import absolute_import - -from celery import Celery - -hello = Celery(set_as_current=False) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/proj/app.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/proj/app.py deleted file mode 100644 index f1fb15e..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/proj/app.py +++ /dev/null @@ -1,5 +0,0 @@ -from __future__ import absolute_import - -from celery import Celery - -app = Celery(set_as_current=False) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_amqp.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_amqp.py deleted file mode 100644 index 8840a9f..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_amqp.py +++ /dev/null @@ -1,153 +0,0 @@ -from __future__ import absolute_import - -from celery.bin.amqp import ( - AMQPAdmin, - AMQShell, - dump_message, - amqp, - main, -) - -from celery.tests.case import AppCase, Mock, WhateverIO, patch - - -class test_AMQShell(AppCase): - - def setup(self): - self.fh = WhateverIO() - self.adm = self.create_adm() - self.shell = AMQShell(connect=self.adm.connect, out=self.fh) - - def create_adm(self, *args, **kwargs): - return AMQPAdmin(app=self.app, out=self.fh, *args, **kwargs) - - def test_queue_declare(self): - self.shell.onecmd('queue.declare foo') - self.assertIn('ok', self.fh.getvalue()) - - def test_missing_command(self): - self.shell.onecmd('foo foo') - self.assertIn('unknown syntax', self.fh.getvalue()) - - def RV(self): - raise Exception(self.fh.getvalue()) - - def test_spec_format_response(self): - spec = self.shell.amqp['exchange.declare'] - self.assertEqual(spec.format_response(None), 'ok.') - self.assertEqual(spec.format_response('NO'), 'NO') - - def test_missing_namespace(self): - self.shell.onecmd('ns.cmd arg') - self.assertIn('unknown syntax', self.fh.getvalue()) - - def test_help(self): - self.shell.onecmd('help') - self.assertIn('Example:', self.fh.getvalue()) - - def test_help_command(self): - self.shell.onecmd('help queue.declare') - self.assertIn('passive:no', self.fh.getvalue()) - - def test_help_unknown_command(self): - self.shell.onecmd('help foo.baz') - self.assertIn('unknown syntax', self.fh.getvalue()) - - def test_onecmd_error(self): - self.shell.dispatch = Mock() - self.shell.dispatch.side_effect = MemoryError() - self.shell.say = Mock() - self.assertFalse(self.shell.needs_reconnect) - self.shell.onecmd('hello') - self.assertTrue(self.shell.say.called) - self.assertTrue(self.shell.needs_reconnect) - - def test_exit(self): - with self.assertRaises(SystemExit): - self.shell.onecmd('exit') - self.assertIn("don't leave!", self.fh.getvalue()) - - def test_note_silent(self): - self.shell.silent = True - self.shell.note('foo bar') - self.assertNotIn('foo bar', self.fh.getvalue()) - - def test_reconnect(self): - self.shell.onecmd('queue.declare foo') - self.shell.needs_reconnect = True - self.shell.onecmd('queue.delete foo') - - def test_completenames(self): - self.assertEqual( - self.shell.completenames('queue.dec'), - ['queue.declare'], - ) - self.assertEqual( - sorted(self.shell.completenames('declare')), - sorted(['queue.declare', 'exchange.declare']), - ) - - def test_empty_line(self): - self.shell.emptyline = Mock() - self.shell.default = Mock() - self.shell.onecmd('') - self.shell.emptyline.assert_called_with() - self.shell.onecmd('foo') - self.shell.default.assert_called_with('foo') - - def test_respond(self): - self.shell.respond({'foo': 'bar'}) - self.assertIn('foo', self.fh.getvalue()) - - def test_prompt(self): - self.assertTrue(self.shell.prompt) - - def test_no_returns(self): - self.shell.onecmd('queue.declare foo') - self.shell.onecmd('exchange.declare bar direct yes') - self.shell.onecmd('queue.bind foo bar baz') - self.shell.onecmd('basic.ack 1') - - def test_dump_message(self): - m = Mock() - m.body = 'the quick brown fox' - m.properties = {'a': 1} - m.delivery_info = {'exchange': 'bar'} - self.assertTrue(dump_message(m)) - - def test_dump_message_no_message(self): - self.assertIn('No messages in queue', dump_message(None)) - - def test_note(self): - self.adm.silent = True - self.adm.note('FOO') - self.assertNotIn('FOO', self.fh.getvalue()) - - def test_run(self): - a = self.create_adm('queue.declare foo') - a.run() - self.assertIn('ok', self.fh.getvalue()) - - def test_run_loop(self): - a = self.create_adm() - a.Shell = Mock() - shell = a.Shell.return_value = Mock() - shell.cmdloop = Mock() - a.run() - shell.cmdloop.assert_called_with() - - shell.cmdloop.side_effect = KeyboardInterrupt() - a.run() - self.assertIn('bibi', self.fh.getvalue()) - - @patch('celery.bin.amqp.amqp') - def test_main(self, Command): - c = Command.return_value = Mock() - main() - c.execute_from_commandline.assert_called_with() - - @patch('celery.bin.amqp.AMQPAdmin') - def test_command(self, cls): - x = amqp(app=self.app) - x.run() - self.assertIs(cls.call_args[1]['app'], self.app) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_base.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_base.py deleted file mode 100644 index 61d56fe..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_base.py +++ /dev/null @@ -1,332 +0,0 @@ -from __future__ import absolute_import - -import os - -from celery.bin.base import ( - Command, - Option, - Extensions, - HelpFormatter, -) -from celery.tests.case import ( - AppCase, Mock, depends_on_current_app, override_stdouts, patch, -) - - -class Object(object): - pass - - -class MyApp(object): - user_options = {'preload': None} - -APP = MyApp() # <-- Used by test_with_custom_app - - -class MockCommand(Command): - mock_args = ('arg1', 'arg2', 'arg3') - - def parse_options(self, prog_name, arguments, command=None): - options = Object() - options.foo = 'bar' - options.prog_name = prog_name - return options, self.mock_args - - def run(self, *args, **kwargs): - return args, kwargs - - -class test_Extensions(AppCase): - - def test_load(self): - with patch('pkg_resources.iter_entry_points') as iterep: - with patch('celery.bin.base.symbol_by_name') as symbyname: - ep = Mock() - ep.name = 'ep' - ep.module_name = 'foo' - ep.attrs = ['bar', 'baz'] - iterep.return_value = [ep] - cls = symbyname.return_value = Mock() - register = Mock() - e = Extensions('unit', register) - e.load() - symbyname.assert_called_with('foo:bar') - register.assert_called_with(cls, name='ep') - - with patch('celery.bin.base.symbol_by_name') as symbyname: - symbyname.side_effect = SyntaxError() - with patch('warnings.warn') as warn: - e.load() - self.assertTrue(warn.called) - - with patch('celery.bin.base.symbol_by_name') as symbyname: - symbyname.side_effect = KeyError('foo') - with self.assertRaises(KeyError): - e.load() - - -class test_HelpFormatter(AppCase): - - def test_format_epilog(self): - f = HelpFormatter() - self.assertTrue(f.format_epilog('hello')) - self.assertFalse(f.format_epilog('')) - - def test_format_description(self): - f = HelpFormatter() - self.assertTrue(f.format_description('hello')) - - -class test_Command(AppCase): - - def test_get_options(self): - cmd = Command() - cmd.option_list = (1, 2, 3) - self.assertTupleEqual(cmd.get_options(), (1, 2, 3)) - - def test_custom_description(self): - - class C(Command): - description = 'foo' - - c = C() - self.assertEqual(c.description, 'foo') - - def test_register_callbacks(self): - c = Command(on_error=8, on_usage_error=9) - self.assertEqual(c.on_error, 8) - self.assertEqual(c.on_usage_error, 9) - - def test_run_raises_UsageError(self): - cb = Mock() - c = Command(on_usage_error=cb) - c.verify_args = Mock() - c.run = Mock() - exc = c.run.side_effect = c.UsageError('foo', status=3) - - self.assertEqual(c(), exc.status) - cb.assert_called_with(exc) - c.verify_args.assert_called_with(()) - - def test_default_on_usage_error(self): - cmd = Command() - cmd.handle_error = Mock() - exc = Exception() - cmd.on_usage_error(exc) - cmd.handle_error.assert_called_with(exc) - - def test_verify_args_missing(self): - c = Command() - - def run(a, b, c): - pass - c.run = run - - with self.assertRaises(c.UsageError): - c.verify_args((1, )) - c.verify_args((1, 2, 3)) - - def test_run_interface(self): - with self.assertRaises(NotImplementedError): - Command().run() - - @patch('sys.stdout') - def test_early_version(self, stdout): - cmd = Command() - with self.assertRaises(SystemExit): - cmd.early_version(['--version']) - - def test_execute_from_commandline(self): - cmd = MockCommand(app=self.app) - args1, kwargs1 = cmd.execute_from_commandline() # sys.argv - self.assertTupleEqual(args1, cmd.mock_args) - self.assertDictContainsSubset({'foo': 'bar'}, kwargs1) - self.assertTrue(kwargs1.get('prog_name')) - args2, kwargs2 = cmd.execute_from_commandline(['foo']) # pass list - self.assertTupleEqual(args2, cmd.mock_args) - self.assertDictContainsSubset({'foo': 'bar', 'prog_name': 'foo'}, - kwargs2) - - def test_with_bogus_args(self): - with override_stdouts() as (_, stderr): - cmd = MockCommand(app=self.app) - cmd.supports_args = False - with self.assertRaises(SystemExit): - cmd.execute_from_commandline(argv=['--bogus']) - self.assertTrue(stderr.getvalue()) - self.assertIn('Unrecognized', stderr.getvalue()) - - def test_with_custom_config_module(self): - prev = os.environ.pop('CELERY_CONFIG_MODULE', None) - try: - cmd = MockCommand(app=self.app) - cmd.setup_app_from_commandline(['--config=foo.bar.baz']) - self.assertEqual(os.environ.get('CELERY_CONFIG_MODULE'), - 'foo.bar.baz') - finally: - if prev: - os.environ['CELERY_CONFIG_MODULE'] = prev - else: - os.environ.pop('CELERY_CONFIG_MODULE', None) - - def test_with_custom_broker(self): - prev = os.environ.pop('CELERY_BROKER_URL', None) - try: - cmd = MockCommand(app=self.app) - cmd.setup_app_from_commandline(['--broker=xyzza://']) - self.assertEqual( - os.environ.get('CELERY_BROKER_URL'), 'xyzza://', - ) - finally: - if prev: - os.environ['CELERY_BROKER_URL'] = prev - else: - os.environ.pop('CELERY_BROKER_URL', None) - - def test_with_custom_app(self): - cmd = MockCommand(app=self.app) - app = '.'.join([__name__, 'APP']) - cmd.setup_app_from_commandline(['--app=%s' % (app, ), - '--loglevel=INFO']) - self.assertIs(cmd.app, APP) - cmd.setup_app_from_commandline(['-A', app, - '--loglevel=INFO']) - self.assertIs(cmd.app, APP) - - def test_setup_app_sets_quiet(self): - cmd = MockCommand(app=self.app) - cmd.setup_app_from_commandline(['-q']) - self.assertTrue(cmd.quiet) - cmd2 = MockCommand(app=self.app) - cmd2.setup_app_from_commandline(['--quiet']) - self.assertTrue(cmd2.quiet) - - def test_setup_app_sets_chdir(self): - with patch('os.chdir') as chdir: - cmd = MockCommand(app=self.app) - cmd.setup_app_from_commandline(['--workdir=/opt']) - chdir.assert_called_with('/opt') - - def test_setup_app_sets_loader(self): - prev = os.environ.get('CELERY_LOADER') - try: - cmd = MockCommand(app=self.app) - cmd.setup_app_from_commandline(['--loader=X.Y:Z']) - self.assertEqual(os.environ['CELERY_LOADER'], 'X.Y:Z') - finally: - if prev is not None: - os.environ['CELERY_LOADER'] = prev - - def test_setup_app_no_respect(self): - cmd = MockCommand(app=self.app) - cmd.respects_app_option = False - with patch('celery.bin.base.Celery') as cp: - cmd.setup_app_from_commandline(['--app=x.y:z']) - self.assertTrue(cp.called) - - def test_setup_app_custom_app(self): - cmd = MockCommand(app=self.app) - app = cmd.app = Mock() - app.user_options = {'preload': None} - cmd.setup_app_from_commandline([]) - self.assertEqual(cmd.app, app) - - def test_find_app_suspects(self): - cmd = MockCommand(app=self.app) - self.assertTrue(cmd.find_app('celery.tests.bin.proj.app')) - self.assertTrue(cmd.find_app('celery.tests.bin.proj')) - self.assertTrue(cmd.find_app('celery.tests.bin.proj:hello')) - self.assertTrue(cmd.find_app('celery.tests.bin.proj.app:app')) - - with self.assertRaises(AttributeError): - cmd.find_app(__name__) - - def test_host_format(self): - cmd = MockCommand(app=self.app) - with patch('socket.gethostname') as hn: - hn.return_value = 'blacktron.example.com' - self.assertEqual(cmd.host_format(''), '') - self.assertEqual( - cmd.host_format('celery@%h'), - 'celery@blacktron.example.com', - ) - self.assertEqual( - cmd.host_format('celery@%d'), - 'celery@example.com', - ) - self.assertEqual( - cmd.host_format('celery@%n'), - 'celery@blacktron', - ) - - def test_say_chat_quiet(self): - cmd = MockCommand(app=self.app) - cmd.quiet = True - self.assertIsNone(cmd.say_chat('<-', 'foo', 'foo')) - - def test_say_chat_show_body(self): - cmd = MockCommand(app=self.app) - cmd.out = Mock() - cmd.show_body = True - cmd.say_chat('->', 'foo', 'body') - cmd.out.assert_called_with('body') - - def test_say_chat_no_body(self): - cmd = MockCommand(app=self.app) - cmd.out = Mock() - cmd.show_body = False - cmd.say_chat('->', 'foo', 'body') - - @depends_on_current_app - def test_with_cmdline_config(self): - cmd = MockCommand(app=self.app) - cmd.enable_config_from_cmdline = True - cmd.namespace = 'celeryd' - rest = cmd.setup_app_from_commandline(argv=[ - '--loglevel=INFO', '--', - 'broker.url=amqp://broker.example.com', - '.prefetch_multiplier=100']) - self.assertEqual(cmd.app.conf.BROKER_URL, - 'amqp://broker.example.com') - self.assertEqual(cmd.app.conf.CELERYD_PREFETCH_MULTIPLIER, 100) - self.assertListEqual(rest, ['--loglevel=INFO']) - - def test_find_app(self): - cmd = MockCommand(app=self.app) - with patch('celery.bin.base.symbol_by_name') as sbn: - from types import ModuleType - x = ModuleType('proj') - - def on_sbn(*args, **kwargs): - - def after(*args, **kwargs): - x.app = 'quick brown fox' - x.__path__ = None - return x - sbn.side_effect = after - return x - sbn.side_effect = on_sbn - x.__path__ = [True] - self.assertEqual(cmd.find_app('proj'), 'quick brown fox') - - def test_parse_preload_options_shortopt(self): - cmd = Command() - cmd.preload_options = (Option('-s', action='store', dest='silent'), ) - acc = cmd.parse_preload_options(['-s', 'yes']) - self.assertEqual(acc.get('silent'), 'yes') - - def test_parse_preload_options_with_equals_and_append(self): - cmd = Command() - opt = Option('--zoom', action='append', default=[]) - cmd.preload_options = (opt,) - acc = cmd.parse_preload_options(['--zoom=1', '--zoom=2']) - - self.assertEqual(acc, {'zoom': ['1', '2']}) - - def test_parse_preload_options_without_equals_and_append(self): - cmd = Command() - opt = Option('--zoom', action='append', default=[]) - cmd.preload_options = (opt,) - acc = cmd.parse_preload_options(['--zoom', '1', '--zoom', '2']) - - self.assertEqual(acc, {'zoom': ['1', '2']}) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_beat.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_beat.py deleted file mode 100644 index 45a7438..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_beat.py +++ /dev/null @@ -1,196 +0,0 @@ -from __future__ import absolute_import - -import logging -import sys - -from collections import defaultdict - -from celery import beat -from celery import platforms -from celery.bin import beat as beat_bin -from celery.apps import beat as beatapp - -from celery.tests.case import AppCase, Mock, patch, restore_logging -from kombu.tests.case import redirect_stdouts - - -class MockedShelveModule(object): - shelves = defaultdict(lambda: {}) - - def open(self, filename, *args, **kwargs): - return self.shelves[filename] -mocked_shelve = MockedShelveModule() - - -class MockService(beat.Service): - started = False - in_sync = False - persistence = mocked_shelve - - def start(self): - self.__class__.started = True - - def sync(self): - self.__class__.in_sync = True - - -class MockBeat(beatapp.Beat): - running = False - - def run(self): - MockBeat.running = True - - -class MockBeat2(beatapp.Beat): - Service = MockService - - def install_sync_handler(self, b): - pass - - -class MockBeat3(beatapp.Beat): - Service = MockService - - def install_sync_handler(self, b): - raise TypeError('xxx') - - -class test_Beat(AppCase): - - def test_loglevel_string(self): - b = beatapp.Beat(app=self.app, loglevel='DEBUG', - redirect_stdouts=False) - self.assertEqual(b.loglevel, logging.DEBUG) - - b2 = beatapp.Beat(app=self.app, loglevel=logging.DEBUG, - redirect_stdouts=False) - self.assertEqual(b2.loglevel, logging.DEBUG) - - def test_colorize(self): - self.app.log.setup = Mock() - b = beatapp.Beat(app=self.app, no_color=True, - redirect_stdouts=False) - b.setup_logging() - self.assertTrue(self.app.log.setup.called) - self.assertEqual(self.app.log.setup.call_args[1]['colorize'], False) - - def test_init_loader(self): - b = beatapp.Beat(app=self.app, redirect_stdouts=False) - b.init_loader() - - def test_process_title(self): - b = beatapp.Beat(app=self.app, redirect_stdouts=False) - b.set_process_title() - - def test_run(self): - b = MockBeat2(app=self.app, redirect_stdouts=False) - MockService.started = False - b.run() - self.assertTrue(MockService.started) - - def psig(self, fun, *args, **kwargs): - handlers = {} - - class Signals(platforms.Signals): - - def __setitem__(self, sig, handler): - handlers[sig] = handler - - p, platforms.signals = platforms.signals, Signals() - try: - fun(*args, **kwargs) - return handlers - finally: - platforms.signals = p - - def test_install_sync_handler(self): - b = beatapp.Beat(app=self.app, redirect_stdouts=False) - clock = MockService(app=self.app) - MockService.in_sync = False - handlers = self.psig(b.install_sync_handler, clock) - with self.assertRaises(SystemExit): - handlers['SIGINT']('SIGINT', object()) - self.assertTrue(MockService.in_sync) - MockService.in_sync = False - - def test_setup_logging(self): - with restore_logging(): - try: - # py3k - delattr(sys.stdout, 'logger') - except AttributeError: - pass - b = beatapp.Beat(app=self.app, redirect_stdouts=False) - b.redirect_stdouts = False - b.app.log.already_setup = False - b.setup_logging() - with self.assertRaises(AttributeError): - sys.stdout.logger - - @redirect_stdouts - @patch('celery.apps.beat.logger') - def test_logs_errors(self, logger, stdout, stderr): - with restore_logging(): - b = MockBeat3( - app=self.app, redirect_stdouts=False, socket_timeout=None, - ) - b.start_scheduler() - self.assertTrue(logger.critical.called) - - @redirect_stdouts - @patch('celery.platforms.create_pidlock') - def test_use_pidfile(self, create_pidlock, stdout, stderr): - b = MockBeat2(app=self.app, pidfile='pidfilelockfilepid', - socket_timeout=None, redirect_stdouts=False) - b.start_scheduler() - self.assertTrue(create_pidlock.called) - - -class MockDaemonContext(object): - opened = False - closed = False - - def __init__(self, *args, **kwargs): - pass - - def open(self): - self.__class__.opened = True - return self - __enter__ = open - - def close(self, *args): - self.__class__.closed = True - __exit__ = close - - -class test_div(AppCase): - - def setup(self): - self.prev, beatapp.Beat = beatapp.Beat, MockBeat - self.ctx, beat_bin.detached = ( - beat_bin.detached, MockDaemonContext, - ) - - def teardown(self): - beatapp.Beat = self.prev - - def test_main(self): - sys.argv = [sys.argv[0], '-s', 'foo'] - try: - beat_bin.main(app=self.app) - self.assertTrue(MockBeat.running) - finally: - MockBeat.running = False - - def test_detach(self): - cmd = beat_bin.beat() - cmd.app = self.app - cmd.run(detach=True) - self.assertTrue(MockDaemonContext.opened) - self.assertTrue(MockDaemonContext.closed) - - def test_parse_options(self): - cmd = beat_bin.beat() - cmd.app = self.app - options, args = cmd.parse_options('celery beat', ['-s', 'foo']) - self.assertEqual(options.schedule, 'foo') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celery.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celery.py deleted file mode 100644 index fbfdb62..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celery.py +++ /dev/null @@ -1,588 +0,0 @@ -from __future__ import absolute_import - -import sys - -from anyjson import dumps -from datetime import datetime - -from celery import __main__ -from celery.platforms import EX_FAILURE, EX_USAGE, EX_OK -from celery.bin.base import Error -from celery.bin.celery import ( - Command, - list_, - call, - purge, - result, - inspect, - control, - status, - migrate, - help, - report, - CeleryCommand, - determine_exit_status, - multi, - main as mainfun, - _RemoteControl, - command, -) - -from celery.tests.case import ( - AppCase, Mock, WhateverIO, override_stdouts, patch, -) - - -class test__main__(AppCase): - - def test_warn_deprecated(self): - with override_stdouts() as (stdout, _): - __main__._warn_deprecated('YADDA YADDA') - self.assertIn('command is deprecated', stdout.getvalue()) - self.assertIn('YADDA YADDA', stdout.getvalue()) - - def test_main(self): - with patch('celery.__main__.maybe_patch_concurrency') as mpc: - with patch('celery.bin.celery.main') as main: - __main__.main() - mpc.assert_called_with() - main.assert_called_with() - - def test_compat_worker(self): - with patch('celery.__main__.maybe_patch_concurrency') as mpc: - with patch('celery.__main__._warn_deprecated') as depr: - with patch('celery.bin.worker.main') as main: - __main__._compat_worker() - mpc.assert_called_with() - depr.assert_called_with('celery worker') - main.assert_called_with() - - def test_compat_multi(self): - with patch('celery.__main__.maybe_patch_concurrency') as mpc: - with patch('celery.__main__._warn_deprecated') as depr: - with patch('celery.bin.multi.main') as main: - __main__._compat_multi() - self.assertFalse(mpc.called) - depr.assert_called_with('celery multi') - main.assert_called_with() - - def test_compat_beat(self): - with patch('celery.__main__.maybe_patch_concurrency') as mpc: - with patch('celery.__main__._warn_deprecated') as depr: - with patch('celery.bin.beat.main') as main: - __main__._compat_beat() - mpc.assert_called_with() - depr.assert_called_with('celery beat') - main.assert_called_with() - - -class test_Command(AppCase): - - def test_Error_repr(self): - x = Error('something happened') - self.assertIsNotNone(x.status) - self.assertTrue(x.reason) - self.assertTrue(str(x)) - - def setup(self): - self.out = WhateverIO() - self.err = WhateverIO() - self.cmd = Command(self.app, stdout=self.out, stderr=self.err) - - def test_error(self): - self.cmd.out = Mock() - self.cmd.error('FOO') - self.assertTrue(self.cmd.out.called) - - def test_out(self): - f = Mock() - self.cmd.out('foo', f) - - def test_call(self): - - def ok_run(): - pass - - self.cmd.run = ok_run - self.assertEqual(self.cmd(), EX_OK) - - def error_run(): - raise Error('error', EX_FAILURE) - self.cmd.run = error_run - self.assertEqual(self.cmd(), EX_FAILURE) - - def test_run_from_argv(self): - with self.assertRaises(NotImplementedError): - self.cmd.run_from_argv('prog', ['foo', 'bar']) - - def test_pretty_list(self): - self.assertEqual(self.cmd.pretty([])[1], '- empty -') - self.assertIn('bar', self.cmd.pretty(['foo', 'bar'])[1]) - - def test_pretty_dict(self): - self.assertIn( - 'OK', - str(self.cmd.pretty({'ok': 'the quick brown fox'})[0]), - ) - self.assertIn( - 'ERROR', - str(self.cmd.pretty({'error': 'the quick brown fox'})[0]), - ) - - def test_pretty(self): - self.assertIn('OK', str(self.cmd.pretty('the quick brown'))) - self.assertIn('OK', str(self.cmd.pretty(object()))) - self.assertIn('OK', str(self.cmd.pretty({'foo': 'bar'}))) - - -class test_list(AppCase): - - def test_list_bindings_no_support(self): - l = list_(app=self.app, stderr=WhateverIO()) - management = Mock() - management.get_bindings.side_effect = NotImplementedError() - with self.assertRaises(Error): - l.list_bindings(management) - - def test_run(self): - l = list_(app=self.app, stderr=WhateverIO()) - l.run('bindings') - - with self.assertRaises(Error): - l.run(None) - - with self.assertRaises(Error): - l.run('foo') - - -class test_call(AppCase): - - def setup(self): - - @self.app.task(shared=False) - def add(x, y): - return x + y - self.add = add - - @patch('celery.app.base.Celery.send_task') - def test_run(self, send_task): - a = call(app=self.app, stderr=WhateverIO(), stdout=WhateverIO()) - a.run(self.add.name) - self.assertTrue(send_task.called) - - a.run(self.add.name, - args=dumps([4, 4]), - kwargs=dumps({'x': 2, 'y': 2})) - self.assertEqual(send_task.call_args[1]['args'], [4, 4]) - self.assertEqual(send_task.call_args[1]['kwargs'], {'x': 2, 'y': 2}) - - a.run(self.add.name, expires=10, countdown=10) - self.assertEqual(send_task.call_args[1]['expires'], 10) - self.assertEqual(send_task.call_args[1]['countdown'], 10) - - now = datetime.now() - iso = now.isoformat() - a.run(self.add.name, expires=iso) - self.assertEqual(send_task.call_args[1]['expires'], now) - with self.assertRaises(ValueError): - a.run(self.add.name, expires='foobaribazibar') - - -class test_purge(AppCase): - - @patch('celery.app.control.Control.purge') - def test_run(self, purge_): - out = WhateverIO() - a = purge(app=self.app, stdout=out) - purge_.return_value = 0 - a.run(force=True) - self.assertIn('No messages purged', out.getvalue()) - - purge_.return_value = 100 - a.run(force=True) - self.assertIn('100 messages', out.getvalue()) - - -class test_result(AppCase): - - def setup(self): - - @self.app.task(shared=False) - def add(x, y): - return x + y - self.add = add - - def test_run(self): - with patch('celery.result.AsyncResult.get') as get: - out = WhateverIO() - r = result(app=self.app, stdout=out) - get.return_value = 'Jerry' - r.run('id') - self.assertIn('Jerry', out.getvalue()) - - get.return_value = 'Elaine' - r.run('id', task=self.add.name) - self.assertIn('Elaine', out.getvalue()) - - with patch('celery.result.AsyncResult.traceback') as tb: - r.run('id', task=self.add.name, traceback=True) - self.assertIn(str(tb), out.getvalue()) - - -class test_status(AppCase): - - @patch('celery.bin.celery.inspect') - def test_run(self, inspect_): - out, err = WhateverIO(), WhateverIO() - ins = inspect_.return_value = Mock() - ins.run.return_value = [] - s = status(self.app, stdout=out, stderr=err) - with self.assertRaises(Error): - s.run() - - ins.run.return_value = ['a', 'b', 'c'] - s.run() - self.assertIn('3 nodes online', out.getvalue()) - s.run(quiet=True) - - -class test_migrate(AppCase): - - @patch('celery.contrib.migrate.migrate_tasks') - def test_run(self, migrate_tasks): - out = WhateverIO() - m = migrate(app=self.app, stdout=out, stderr=WhateverIO()) - with self.assertRaises(TypeError): - m.run() - self.assertFalse(migrate_tasks.called) - - m.run('memory://foo', 'memory://bar') - self.assertTrue(migrate_tasks.called) - - state = Mock() - state.count = 10 - state.strtotal = 30 - m.on_migrate_task(state, {'task': 'tasks.add', 'id': 'ID'}, None) - self.assertIn('10/30', out.getvalue()) - - -class test_report(AppCase): - - def test_run(self): - out = WhateverIO() - r = report(app=self.app, stdout=out) - self.assertEqual(r.run(), EX_OK) - self.assertTrue(out.getvalue()) - - -class test_help(AppCase): - - def test_run(self): - out = WhateverIO() - h = help(app=self.app, stdout=out) - h.parser = Mock() - self.assertEqual(h.run(), EX_USAGE) - self.assertTrue(out.getvalue()) - self.assertTrue(h.usage('help')) - h.parser.print_help.assert_called_with() - - -class test_CeleryCommand(AppCase): - - def test_execute_from_commandline(self): - x = CeleryCommand(app=self.app) - x.handle_argv = Mock() - x.handle_argv.return_value = 1 - with self.assertRaises(SystemExit): - x.execute_from_commandline() - - x.handle_argv.return_value = True - with self.assertRaises(SystemExit): - x.execute_from_commandline() - - x.handle_argv.side_effect = KeyboardInterrupt() - with self.assertRaises(SystemExit): - x.execute_from_commandline() - - x.respects_app_option = True - with self.assertRaises(SystemExit): - x.execute_from_commandline(['celery', 'multi']) - self.assertFalse(x.respects_app_option) - x.respects_app_option = True - with self.assertRaises(SystemExit): - x.execute_from_commandline(['manage.py', 'celery', 'multi']) - self.assertFalse(x.respects_app_option) - - def test_with_pool_option(self): - x = CeleryCommand(app=self.app) - self.assertIsNone(x.with_pool_option(['celery', 'events'])) - self.assertTrue(x.with_pool_option(['celery', 'worker'])) - self.assertTrue(x.with_pool_option(['manage.py', 'celery', 'worker'])) - - def test_load_extensions_no_commands(self): - with patch('celery.bin.celery.Extensions') as Ext: - ext = Ext.return_value = Mock(name='Extension') - ext.load.return_value = None - x = CeleryCommand(app=self.app) - x.load_extension_commands() - - def test_determine_exit_status(self): - self.assertEqual(determine_exit_status('true'), EX_OK) - self.assertEqual(determine_exit_status(''), EX_FAILURE) - - def test_relocate_args_from_start(self): - x = CeleryCommand(app=self.app) - self.assertEqual(x._relocate_args_from_start(None), []) - self.assertEqual( - x._relocate_args_from_start( - ['-l', 'debug', 'worker', '-c', '3', '--foo'], - ), - ['worker', '-c', '3', '--foo', '-l', 'debug'], - ) - self.assertEqual( - x._relocate_args_from_start( - ['--pool=gevent', '-l', 'debug', 'worker', '--foo', '-c', '3'], - ), - ['worker', '--foo', '-c', '3', '--pool=gevent', '-l', 'debug'], - ) - self.assertEqual( - x._relocate_args_from_start(['foo', '--foo=1']), - ['foo', '--foo=1'], - ) - - def test_handle_argv(self): - x = CeleryCommand(app=self.app) - x.execute = Mock() - x.handle_argv('celery', []) - x.execute.assert_called_with('help', ['help']) - - x.handle_argv('celery', ['start', 'foo']) - x.execute.assert_called_with('start', ['start', 'foo']) - - def test_execute(self): - x = CeleryCommand(app=self.app) - Help = x.commands['help'] = Mock() - help = Help.return_value = Mock() - x.execute('fooox', ['a']) - help.run_from_argv.assert_called_with(x.prog_name, [], command='help') - help.reset() - x.execute('help', ['help']) - help.run_from_argv.assert_called_with(x.prog_name, [], command='help') - - Dummy = x.commands['dummy'] = Mock() - dummy = Dummy.return_value = Mock() - exc = dummy.run_from_argv.side_effect = Error( - 'foo', status='EX_FAILURE', - ) - x.on_error = Mock(name='on_error') - help.reset() - x.execute('dummy', ['dummy']) - x.on_error.assert_called_with(exc) - dummy.run_from_argv.assert_called_with( - x.prog_name, [], command='dummy', - ) - help.run_from_argv.assert_called_with( - x.prog_name, [], command='help', - ) - - exc = dummy.run_from_argv.side_effect = x.UsageError('foo') - x.on_usage_error = Mock() - x.execute('dummy', ['dummy']) - x.on_usage_error.assert_called_with(exc) - - def test_on_usage_error(self): - x = CeleryCommand(app=self.app) - x.error = Mock() - x.on_usage_error(x.UsageError('foo'), command=None) - self.assertTrue(x.error.called) - x.on_usage_error(x.UsageError('foo'), command='dummy') - - def test_prepare_prog_name(self): - x = CeleryCommand(app=self.app) - main = Mock(name='__main__') - main.__file__ = '/opt/foo.py' - with patch.dict(sys.modules, __main__=main): - self.assertEqual(x.prepare_prog_name('__main__.py'), '/opt/foo.py') - self.assertEqual(x.prepare_prog_name('celery'), 'celery') - - -class test_RemoteControl(AppCase): - - def test_call_interface(self): - with self.assertRaises(NotImplementedError): - _RemoteControl(app=self.app).call() - - -class test_inspect(AppCase): - - def test_usage(self): - self.assertTrue(inspect(app=self.app).usage('foo')) - - def test_command_info(self): - i = inspect(app=self.app) - self.assertTrue(i.get_command_info( - 'ping', help=True, color=i.colored.red, - )) - - def test_list_commands_color(self): - i = inspect(app=self.app) - self.assertTrue(i.list_commands( - help=True, color=i.colored.red, - )) - self.assertTrue(i.list_commands( - help=False, color=None, - )) - - def test_epilog(self): - self.assertTrue(inspect(app=self.app).epilog) - - def test_do_call_method_sql_transport_type(self): - self.app.connection = Mock() - conn = self.app.connection.return_value = Mock(name='Connection') - conn.transport.driver_type = 'sql' - i = inspect(app=self.app) - with self.assertRaises(i.Error): - i.do_call_method(['ping']) - - def test_say_directions(self): - i = inspect(self.app) - i.out = Mock() - i.quiet = True - i.say_chat('<-', 'hello out') - self.assertFalse(i.out.called) - - i.say_chat('->', 'hello in') - self.assertTrue(i.out.called) - - i.quiet = False - i.out.reset_mock() - i.say_chat('<-', 'hello out', 'body') - self.assertTrue(i.out.called) - - @patch('celery.app.control.Control.inspect') - def test_run(self, real): - out = WhateverIO() - i = inspect(app=self.app, stdout=out) - with self.assertRaises(Error): - i.run() - with self.assertRaises(Error): - i.run('help') - with self.assertRaises(Error): - i.run('xyzzybaz') - - i.run('ping') - self.assertTrue(real.called) - i.run('ping', destination='foo,bar') - self.assertEqual(real.call_args[1]['destination'], ['foo', 'bar']) - self.assertEqual(real.call_args[1]['timeout'], 0.2) - callback = real.call_args[1]['callback'] - - callback({'foo': {'ok': 'pong'}}) - self.assertIn('OK', out.getvalue()) - - instance = real.return_value = Mock() - instance.ping.return_value = None - with self.assertRaises(Error): - i.run('ping') - - out.seek(0) - out.truncate() - i.quiet = True - i.say_chat('<-', 'hello') - self.assertFalse(out.getvalue()) - - -class test_control(AppCase): - - def control(self, patch_call, *args, **kwargs): - kwargs.setdefault('app', Mock(name='app')) - c = control(*args, **kwargs) - if patch_call: - c.call = Mock(name='control.call') - return c - - def test_call(self): - i = self.control(False) - i.call('foo', 1, kw=2) - i.app.control.foo.assert_called_with(1, kw=2, reply=True) - - def test_pool_grow(self): - i = self.control(True) - i.pool_grow('pool_grow', n=2) - i.call.assert_called_with('pool_grow', 2) - - def test_pool_shrink(self): - i = self.control(True) - i.pool_shrink('pool_shrink', n=2) - i.call.assert_called_with('pool_shrink', 2) - - def test_autoscale(self): - i = self.control(True) - i.autoscale('autoscale', max=3, min=2) - i.call.assert_called_with('autoscale', 3, 2) - - def test_rate_limit(self): - i = self.control(True) - i.rate_limit('rate_limit', 'proj.add', '1/s') - i.call.assert_called_with('rate_limit', 'proj.add', '1/s') - - def test_time_limit(self): - i = self.control(True) - i.time_limit('time_limit', 'proj.add', 10, 30) - i.call.assert_called_with('time_limit', 'proj.add', 10, 30) - - def test_add_consumer(self): - i = self.control(True) - i.add_consumer( - 'add_consumer', 'queue', 'exchange', 'topic', 'rkey', - durable=True, - ) - i.call.assert_called_with( - 'add_consumer', 'queue', 'exchange', 'topic', 'rkey', - durable=True, - ) - - def test_cancel_consumer(self): - i = self.control(True) - i.cancel_consumer('cancel_consumer', 'queue') - i.call.assert_called_with('cancel_consumer', 'queue') - - -class test_multi(AppCase): - - def test_get_options(self): - self.assertTupleEqual(multi(app=self.app).get_options(), ()) - - def test_run_from_argv(self): - with patch('celery.bin.multi.MultiTool') as MultiTool: - m = MultiTool.return_value = Mock() - multi(self.app).run_from_argv('celery', ['arg'], command='multi') - m.execute_from_commandline.assert_called_with( - ['multi', 'arg'], 'celery', - ) - - -class test_main(AppCase): - - @patch('celery.bin.celery.CeleryCommand') - def test_main(self, Command): - cmd = Command.return_value = Mock() - mainfun() - cmd.execute_from_commandline.assert_called_with(None) - - @patch('celery.bin.celery.CeleryCommand') - def test_main_KeyboardInterrupt(self, Command): - cmd = Command.return_value = Mock() - cmd.execute_from_commandline.side_effect = KeyboardInterrupt() - mainfun() - cmd.execute_from_commandline.assert_called_with(None) - - -class test_compat(AppCase): - - def test_compat_command_decorator(self): - with patch('celery.bin.celery.CeleryCommand') as CC: - self.assertEqual(command(), CC.register_command) - fun = Mock(name='fun') - command(fun) - CC.register_command.assert_called_with(fun) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celeryd_detach.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celeryd_detach.py deleted file mode 100644 index 0fa3934..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celeryd_detach.py +++ /dev/null @@ -1,106 +0,0 @@ -from __future__ import absolute_import - -from celery.platforms import IS_WINDOWS -from celery.bin.celeryd_detach import ( - detach, - detached_celeryd, - main, -) - -from celery.tests.case import AppCase, Mock, override_stdouts, patch - - -if not IS_WINDOWS: - class test_detached(AppCase): - - @patch('celery.bin.celeryd_detach.detached') - @patch('os.execv') - @patch('celery.bin.celeryd_detach.logger') - @patch('celery.app.log.Logging.setup_logging_subsystem') - def test_execs(self, setup_logs, logger, execv, detached): - context = detached.return_value = Mock() - context.__enter__ = Mock() - context.__exit__ = Mock() - - detach('/bin/boo', ['a', 'b', 'c'], logfile='/var/log', - pidfile='/var/pid', hostname='foo@example.com') - detached.assert_called_with( - '/var/log', '/var/pid', None, None, None, None, False, - after_forkers=False, - ) - execv.assert_called_with('/bin/boo', ['/bin/boo', 'a', 'b', 'c']) - - execv.side_effect = Exception('foo') - r = detach( - '/bin/boo', ['a', 'b', 'c'], - logfile='/var/log', pidfile='/var/pid', - hostname='foo@example.com', app=self.app) - context.__enter__.assert_called_with() - self.assertTrue(logger.critical.called) - setup_logs.assert_called_with( - 'ERROR', '/var/log', hostname='foo@example.com') - self.assertEqual(r, 1) - - -class test_PartialOptionParser(AppCase): - - def test_parser(self): - x = detached_celeryd(self.app) - p = x.Parser('celeryd_detach') - options, values = p.parse_args(['--logfile=foo', '--fake', '--enable', - 'a', 'b', '-c1', '-d', '2']) - self.assertEqual(options.logfile, 'foo') - self.assertEqual(values, ['a', 'b']) - self.assertEqual(p.leftovers, ['--enable', '-c1', '-d', '2']) - - with override_stdouts(): - with self.assertRaises(SystemExit): - p.parse_args(['--logfile']) - p.get_option('--logfile').nargs = 2 - with self.assertRaises(SystemExit): - p.parse_args(['--logfile=a']) - with self.assertRaises(SystemExit): - p.parse_args(['--fake=abc']) - - assert p.get_option('--logfile').nargs == 2 - p.parse_args(['--logfile=a', 'b']) - p.get_option('--logfile').nargs = 1 - - -class test_Command(AppCase): - argv = ['--autoscale=10,2', '-c', '1', - '--logfile=/var/log', '-lDEBUG', - '--', '.disable_rate_limits=1'] - - def test_parse_options(self): - x = detached_celeryd(app=self.app) - o, v, l = x.parse_options('cd', self.argv) - self.assertEqual(o.logfile, '/var/log') - self.assertEqual(l, ['--autoscale=10,2', '-c', '1', - '-lDEBUG', '--logfile=/var/log', - '--pidfile=celeryd.pid']) - x.parse_options('cd', []) # no args - - @patch('sys.exit') - @patch('celery.bin.celeryd_detach.detach') - def test_execute_from_commandline(self, detach, exit): - x = detached_celeryd(app=self.app) - x.execute_from_commandline(self.argv) - self.assertTrue(exit.called) - detach.assert_called_with( - path=x.execv_path, uid=None, gid=None, - umask=None, fake=False, logfile='/var/log', pidfile='celeryd.pid', - working_directory=None, executable=None, hostname=None, - argv=x.execv_argv + [ - '-c', '1', '-lDEBUG', - '--logfile=/var/log', '--pidfile=celeryd.pid', - '--', '.disable_rate_limits=1' - ], - app=self.app, - ) - - @patch('celery.bin.celeryd_detach.detached_celeryd') - def test_main(self, command): - c = command.return_value = Mock() - main(self.app) - c.execute_from_commandline.assert_called_with() diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celeryevdump.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celeryevdump.py deleted file mode 100644 index 09cdc4d..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celeryevdump.py +++ /dev/null @@ -1,68 +0,0 @@ -from __future__ import absolute_import - -from time import time - -from celery.events.dumper import ( - humanize_type, - Dumper, - evdump, -) - -from celery.tests.case import AppCase, Mock, WhateverIO, patch - - -class test_Dumper(AppCase): - - def setup(self): - self.out = WhateverIO() - self.dumper = Dumper(out=self.out) - - def test_humanize_type(self): - self.assertEqual(humanize_type('worker-offline'), 'shutdown') - self.assertEqual(humanize_type('task-started'), 'task started') - - def test_format_task_event(self): - self.dumper.format_task_event( - 'worker@example.com', time(), 'task-started', 'tasks.add', {}) - self.assertTrue(self.out.getvalue()) - - def test_on_event(self): - event = { - 'hostname': 'worker@example.com', - 'timestamp': time(), - 'uuid': '1ef', - 'name': 'tasks.add', - 'args': '(2, 2)', - 'kwargs': '{}', - } - self.dumper.on_event(dict(event, type='task-received')) - self.assertTrue(self.out.getvalue()) - self.dumper.on_event(dict(event, type='task-revoked')) - self.dumper.on_event(dict(event, type='worker-online')) - - @patch('celery.events.EventReceiver.capture') - def test_evdump(self, capture): - capture.side_effect = KeyboardInterrupt() - evdump(app=self.app) - - def test_evdump_error_handler(self): - app = Mock(name='app') - with patch('celery.events.dumper.Dumper') as Dumper: - Dumper.return_value = Mock(name='dumper') - recv = app.events.Receiver.return_value = Mock() - - def se(*_a, **_k): - recv.capture.side_effect = SystemExit() - raise KeyError() - recv.capture.side_effect = se - - Conn = app.connection.return_value = Mock(name='conn') - conn = Conn.clone.return_value = Mock(name='cloned_conn') - conn.connection_errors = (KeyError, ) - conn.channel_errors = () - - evdump(app) - self.assertTrue(conn.ensure_connection.called) - errback = conn.ensure_connection.call_args[0][0] - errback(KeyError(), 1) - self.assertTrue(conn.as_uri.called) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_events.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_events.py deleted file mode 100644 index a6e79f7..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_events.py +++ /dev/null @@ -1,73 +0,0 @@ -from __future__ import absolute_import - -from celery.bin import events - -from celery.tests.case import AppCase, SkipTest, patch, _old_patch - - -class MockCommand(object): - executed = [] - - def execute_from_commandline(self, **kwargs): - self.executed.append(True) - - -def proctitle(prog, info=None): - proctitle.last = (prog, info) -proctitle.last = () - - -class test_events(AppCase): - - def setup(self): - self.ev = events.events(app=self.app) - - @_old_patch('celery.events.dumper', 'evdump', - lambda **kw: 'me dumper, you?') - @_old_patch('celery.bin.events', 'set_process_title', proctitle) - def test_run_dump(self): - self.assertEqual(self.ev.run(dump=True), 'me dumper, you?') - self.assertIn('celery events:dump', proctitle.last[0]) - - def test_run_top(self): - try: - import curses # noqa - except ImportError: - raise SkipTest('curses monitor requires curses') - - @_old_patch('celery.events.cursesmon', 'evtop', - lambda **kw: 'me top, you?') - @_old_patch('celery.bin.events', 'set_process_title', proctitle) - def _inner(): - self.assertEqual(self.ev.run(), 'me top, you?') - self.assertIn('celery events:top', proctitle.last[0]) - return _inner() - - @_old_patch('celery.events.snapshot', 'evcam', - lambda *a, **k: (a, k)) - @_old_patch('celery.bin.events', 'set_process_title', proctitle) - def test_run_cam(self): - a, kw = self.ev.run(camera='foo.bar.baz', logfile='logfile') - self.assertEqual(a[0], 'foo.bar.baz') - self.assertEqual(kw['freq'], 1.0) - self.assertIsNone(kw['maxrate']) - self.assertEqual(kw['loglevel'], 'INFO') - self.assertEqual(kw['logfile'], 'logfile') - self.assertIn('celery events:cam', proctitle.last[0]) - - @patch('celery.events.snapshot.evcam') - @patch('celery.bin.events.detached') - def test_run_cam_detached(self, detached, evcam): - self.ev.prog_name = 'celery events' - self.ev.run_evcam('myapp.Camera', detach=True) - self.assertTrue(detached.called) - self.assertTrue(evcam.called) - - def test_get_options(self): - self.assertTrue(self.ev.get_options()) - - @_old_patch('celery.bin.events', 'events', MockCommand) - def test_main(self): - MockCommand.executed = [] - events.main() - self.assertTrue(MockCommand.executed) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_multi.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_multi.py deleted file mode 100644 index ee77a45..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_multi.py +++ /dev/null @@ -1,474 +0,0 @@ -from __future__ import absolute_import - -import errno -import signal -import sys - -from celery.bin.multi import ( - main, - MultiTool, - findsig, - abbreviations, - parse_ns_range, - format_opt, - quote, - NamespacedOptionParser, - multi_args, - __doc__ as doc, -) - -from celery.tests.case import AppCase, Mock, WhateverIO, SkipTest, patch - - -class test_functions(AppCase): - - def test_findsig(self): - self.assertEqual(findsig(['a', 'b', 'c', '-1']), 1) - self.assertEqual(findsig(['--foo=1', '-9']), 9) - self.assertEqual(findsig(['-INT']), signal.SIGINT) - self.assertEqual(findsig([]), signal.SIGTERM) - self.assertEqual(findsig(['-s']), signal.SIGTERM) - self.assertEqual(findsig(['-log']), signal.SIGTERM) - - def test_abbreviations(self): - expander = abbreviations({'%s': 'START', - '%x': 'STOP'}) - self.assertEqual(expander('foo%s'), 'fooSTART') - self.assertEqual(expander('foo%x'), 'fooSTOP') - self.assertEqual(expander('foo%y'), 'foo%y') - self.assertIsNone(expander(None)) - - def test_parse_ns_range(self): - self.assertEqual(parse_ns_range('1-3', True), ['1', '2', '3']) - self.assertEqual(parse_ns_range('1-3', False), ['1-3']) - self.assertEqual(parse_ns_range( - '1-3,10,11,20', True), - ['1', '2', '3', '10', '11', '20'], - ) - - def test_format_opt(self): - self.assertEqual(format_opt('--foo', None), '--foo') - self.assertEqual(format_opt('-c', 1), '-c 1') - self.assertEqual(format_opt('--log', 'foo'), '--log=foo') - - def test_quote(self): - self.assertEqual(quote("the 'quick"), "'the '\\''quick'") - - -class test_NamespacedOptionParser(AppCase): - - def test_parse(self): - x = NamespacedOptionParser(['-c:1,3', '4']) - self.assertEqual(x.namespaces.get('1,3'), {'-c': '4'}) - x = NamespacedOptionParser(['-c:jerry,elaine', '5', - '--loglevel:kramer=DEBUG', - '--flag', - '--logfile=foo', '-Q', 'bar', 'a', 'b', - '--', '.disable_rate_limits=1']) - self.assertEqual(x.options, {'--logfile': 'foo', - '-Q': 'bar', - '--flag': None}) - self.assertEqual(x.values, ['a', 'b']) - self.assertEqual(x.namespaces.get('jerry,elaine'), {'-c': '5'}) - self.assertEqual(x.namespaces.get('kramer'), {'--loglevel': 'DEBUG'}) - self.assertEqual(x.passthrough, '-- .disable_rate_limits=1') - - -class test_multi_args(AppCase): - - @patch('socket.gethostname') - def test_parse(self, gethostname): - p = NamespacedOptionParser([ - '-c:jerry,elaine', '5', - '--loglevel:kramer=DEBUG', - '--flag', - '--logfile=foo', '-Q', 'bar', 'jerry', - 'elaine', 'kramer', - '--', '.disable_rate_limits=1', - ]) - it = multi_args(p, cmd='COMMAND', append='*AP*', - prefix='*P*', suffix='*S*') - names = list(it) - - def assert_line_in(name, args): - self.assertIn(name, [tup[0] for tup in names]) - argv = None - for item in names: - if item[0] == name: - argv = item[1] - self.assertTrue(argv) - for arg in args: - self.assertIn(arg, argv) - - assert_line_in( - '*P*jerry@*S*', - ['COMMAND', '-n *P*jerry@*S*', '-Q bar', - '-c 5', '--flag', '--logfile=foo', - '-- .disable_rate_limits=1', '*AP*'], - ) - assert_line_in( - '*P*elaine@*S*', - ['COMMAND', '-n *P*elaine@*S*', '-Q bar', - '-c 5', '--flag', '--logfile=foo', - '-- .disable_rate_limits=1', '*AP*'], - ) - assert_line_in( - '*P*kramer@*S*', - ['COMMAND', '--loglevel=DEBUG', '-n *P*kramer@*S*', - '-Q bar', '--flag', '--logfile=foo', - '-- .disable_rate_limits=1', '*AP*'], - ) - expand = names[0][2] - self.assertEqual(expand('%h'), '*P*jerry@*S*') - self.assertEqual(expand('%n'), 'jerry') - names2 = list(multi_args(p, cmd='COMMAND', append='', - prefix='*P*', suffix='*S*')) - self.assertEqual(names2[0][1][-1], '-- .disable_rate_limits=1') - - gethostname.return_value = 'example.com' - p2 = NamespacedOptionParser(['10', '-c:1', '5']) - names3 = list(multi_args(p2, cmd='COMMAND')) - self.assertEqual(len(names3), 10) - self.assertEqual( - names3[0][0:2], - ('celery1@example.com', - ['COMMAND', '-n celery1@example.com', '-c 5', '']), - ) - for i, worker in enumerate(names3[1:]): - self.assertEqual( - worker[0:2], - ('celery%s@example.com' % (i + 2), - ['COMMAND', '-n celery%s@example.com' % (i + 2), '']), - ) - - names4 = list(multi_args(p2, cmd='COMMAND', suffix='""')) - self.assertEqual(len(names4), 10) - self.assertEqual( - names4[0][0:2], - ('celery1@', - ['COMMAND', '-n celery1@', '-c 5', '']), - ) - - p3 = NamespacedOptionParser(['foo@', '-c:foo', '5']) - names5 = list(multi_args(p3, cmd='COMMAND', suffix='""')) - self.assertEqual( - names5[0][0:2], - ('foo@', - ['COMMAND', '-n foo@', '-c 5', '']), - ) - - -class test_MultiTool(AppCase): - - def setup(self): - self.fh = WhateverIO() - self.env = {} - self.t = MultiTool(env=self.env, fh=self.fh) - - def test_note(self): - self.t.note('hello world') - self.assertEqual(self.fh.getvalue(), 'hello world\n') - - def test_note_quiet(self): - self.t.quiet = True - self.t.note('hello world') - self.assertFalse(self.fh.getvalue()) - - def test_info(self): - self.t.verbose = True - self.t.info('hello info') - self.assertEqual(self.fh.getvalue(), 'hello info\n') - - def test_info_not_verbose(self): - self.t.verbose = False - self.t.info('hello info') - self.assertFalse(self.fh.getvalue()) - - def test_error(self): - self.t.carp = Mock() - self.t.usage = Mock() - self.assertEqual(self.t.error('foo'), 1) - self.t.carp.assert_called_with('foo') - self.t.usage.assert_called_with() - - self.t.carp = Mock() - self.assertEqual(self.t.error(), 1) - self.assertFalse(self.t.carp.called) - - self.assertEqual(self.t.retcode, 1) - - @patch('celery.bin.multi.Popen') - def test_waitexec(self, Popen): - self.t.note = Mock() - pipe = Popen.return_value = Mock() - pipe.wait.return_value = -10 - self.assertEqual(self.t.waitexec(['-m', 'foo'], 'path'), 10) - Popen.assert_called_with(['path', '-m', 'foo'], env=self.t.env) - self.t.note.assert_called_with('* Child was terminated by signal 10') - - pipe.wait.return_value = 2 - self.assertEqual(self.t.waitexec(['-m', 'foo'], 'path'), 2) - self.t.note.assert_called_with( - '* Child terminated with errorcode 2', - ) - - pipe.wait.return_value = 0 - self.assertFalse(self.t.waitexec(['-m', 'foo', 'path'])) - - def test_nosplash(self): - self.t.nosplash = True - self.t.splash() - self.assertFalse(self.fh.getvalue()) - - def test_splash(self): - self.t.nosplash = False - self.t.splash() - self.assertIn('celery multi', self.fh.getvalue()) - - def test_usage(self): - self.t.usage() - self.assertTrue(self.fh.getvalue()) - - def test_help(self): - self.t.help([]) - self.assertIn(doc, self.fh.getvalue()) - - def test_expand(self): - self.t.expand(['foo%n', 'ask', 'klask', 'dask']) - self.assertEqual( - self.fh.getvalue(), 'fooask\nfooklask\nfoodask\n', - ) - - def test_restart(self): - stop = self.t._stop_nodes = Mock() - self.t.restart(['jerry', 'george'], 'celery worker') - waitexec = self.t.waitexec = Mock() - self.assertTrue(stop.called) - callback = stop.call_args[1]['callback'] - self.assertTrue(callback) - - waitexec.return_value = 0 - callback('jerry', ['arg'], 13) - waitexec.assert_called_with(['arg'], path=sys.executable) - self.assertIn('OK', self.fh.getvalue()) - self.fh.seek(0) - self.fh.truncate() - - waitexec.return_value = 1 - callback('jerry', ['arg'], 13) - self.assertIn('FAILED', self.fh.getvalue()) - - def test_stop(self): - self.t.getpids = Mock() - self.t.getpids.return_value = [2, 3, 4] - self.t.shutdown_nodes = Mock() - self.t.stop(['a', 'b', '-INT'], 'celery worker') - self.t.shutdown_nodes.assert_called_with( - [2, 3, 4], sig=signal.SIGINT, retry=None, callback=None, - - ) - - def test_kill(self): - if not hasattr(signal, 'SIGKILL'): - raise SkipTest('SIGKILL not supported by this platform') - self.t.getpids = Mock() - self.t.getpids.return_value = [ - ('a', None, 10), - ('b', None, 11), - ('c', None, 12) - ] - sig = self.t.signal_node = Mock() - - self.t.kill(['a', 'b', 'c'], 'celery worker') - - sigs = sig.call_args_list - self.assertEqual(len(sigs), 3) - self.assertEqual(sigs[0][0], ('a', 10, signal.SIGKILL)) - self.assertEqual(sigs[1][0], ('b', 11, signal.SIGKILL)) - self.assertEqual(sigs[2][0], ('c', 12, signal.SIGKILL)) - - def prepare_pidfile_for_getpids(self, Pidfile): - class pids(object): - - def __init__(self, path): - self.path = path - - def read_pid(self): - try: - return {'foo.pid': 10, - 'bar.pid': 11}[self.path] - except KeyError: - raise ValueError() - Pidfile.side_effect = pids - - @patch('celery.bin.multi.Pidfile') - @patch('socket.gethostname') - def test_getpids(self, gethostname, Pidfile): - gethostname.return_value = 'e.com' - self.prepare_pidfile_for_getpids(Pidfile) - callback = Mock() - - p = NamespacedOptionParser(['foo', 'bar', 'baz']) - nodes = self.t.getpids(p, 'celery worker', callback=callback) - node_0, node_1 = nodes - self.assertEqual(node_0[0], 'foo@e.com') - self.assertEqual( - sorted(node_0[1]), - sorted(('celery worker', '--pidfile=foo.pid', - '-n foo@e.com', '')), - ) - self.assertEqual(node_0[2], 10) - - self.assertEqual(node_1[0], 'bar@e.com') - self.assertEqual( - sorted(node_1[1]), - sorted(('celery worker', '--pidfile=bar.pid', - '-n bar@e.com', '')), - ) - self.assertEqual(node_1[2], 11) - self.assertTrue(callback.called) - cargs, _ = callback.call_args - self.assertEqual(cargs[0], 'baz@e.com') - self.assertItemsEqual( - cargs[1], - ['celery worker', '--pidfile=baz.pid', '-n baz@e.com', ''], - ) - self.assertIsNone(cargs[2]) - self.assertIn('DOWN', self.fh.getvalue()) - - # without callback, should work - nodes = self.t.getpids(p, 'celery worker', callback=None) - - @patch('celery.bin.multi.Pidfile') - @patch('socket.gethostname') - @patch('celery.bin.multi.sleep') - def test_shutdown_nodes(self, slepp, gethostname, Pidfile): - gethostname.return_value = 'e.com' - self.prepare_pidfile_for_getpids(Pidfile) - self.assertIsNone(self.t.shutdown_nodes([])) - self.t.signal_node = Mock() - node_alive = self.t.node_alive = Mock() - self.t.node_alive.return_value = False - - callback = Mock() - self.t.stop(['foo', 'bar', 'baz'], 'celery worker', callback=callback) - sigs = sorted(self.t.signal_node.call_args_list) - self.assertEqual(len(sigs), 2) - self.assertIn( - ('foo@e.com', 10, signal.SIGTERM), - [tup[0] for tup in sigs], - ) - self.assertIn( - ('bar@e.com', 11, signal.SIGTERM), - [tup[0] for tup in sigs], - ) - self.t.signal_node.return_value = False - self.assertTrue(callback.called) - self.t.stop(['foo', 'bar', 'baz'], 'celery worker', callback=None) - - def on_node_alive(pid): - if node_alive.call_count > 4: - return True - return False - self.t.signal_node.return_value = True - self.t.node_alive.side_effect = on_node_alive - self.t.stop(['foo', 'bar', 'baz'], 'celery worker', retry=True) - - @patch('os.kill') - def test_node_alive(self, kill): - kill.return_value = True - self.assertTrue(self.t.node_alive(13)) - esrch = OSError() - esrch.errno = errno.ESRCH - kill.side_effect = esrch - self.assertFalse(self.t.node_alive(13)) - kill.assert_called_with(13, 0) - - enoent = OSError() - enoent.errno = errno.ENOENT - kill.side_effect = enoent - with self.assertRaises(OSError): - self.t.node_alive(13) - - @patch('os.kill') - def test_signal_node(self, kill): - kill.return_value = True - self.assertTrue(self.t.signal_node('foo', 13, 9)) - esrch = OSError() - esrch.errno = errno.ESRCH - kill.side_effect = esrch - self.assertFalse(self.t.signal_node('foo', 13, 9)) - kill.assert_called_with(13, 9) - self.assertIn('Could not signal foo', self.fh.getvalue()) - - enoent = OSError() - enoent.errno = errno.ENOENT - kill.side_effect = enoent - with self.assertRaises(OSError): - self.t.signal_node('foo', 13, 9) - - def test_start(self): - self.t.waitexec = Mock() - self.t.waitexec.return_value = 0 - self.assertFalse(self.t.start(['foo', 'bar', 'baz'], 'celery worker')) - - self.t.waitexec.return_value = 1 - self.assertFalse(self.t.start(['foo', 'bar', 'baz'], 'celery worker')) - - def test_show(self): - self.t.show(['foo', 'bar', 'baz'], 'celery worker') - self.assertTrue(self.fh.getvalue()) - - @patch('socket.gethostname') - def test_get(self, gethostname): - gethostname.return_value = 'e.com' - self.t.get(['xuzzy@e.com', 'foo', 'bar', 'baz'], 'celery worker') - self.assertFalse(self.fh.getvalue()) - self.t.get(['foo@e.com', 'foo', 'bar', 'baz'], 'celery worker') - self.assertTrue(self.fh.getvalue()) - - @patch('socket.gethostname') - def test_names(self, gethostname): - gethostname.return_value = 'e.com' - self.t.names(['foo', 'bar', 'baz'], 'celery worker') - self.assertIn('foo@e.com\nbar@e.com\nbaz@e.com', self.fh.getvalue()) - - def test_execute_from_commandline(self): - start = self.t.commands['start'] = Mock() - self.t.error = Mock() - self.t.execute_from_commandline(['multi', 'start', 'foo', 'bar']) - self.assertFalse(self.t.error.called) - start.assert_called_with(['foo', 'bar'], 'celery worker') - - self.t.error = Mock() - self.t.execute_from_commandline(['multi', 'frob', 'foo', 'bar']) - self.t.error.assert_called_with('Invalid command: frob') - - self.t.error = Mock() - self.t.execute_from_commandline(['multi']) - self.t.error.assert_called_with() - - self.t.error = Mock() - self.t.execute_from_commandline(['multi', '-foo']) - self.t.error.assert_called_with() - - self.t.execute_from_commandline( - ['multi', 'start', 'foo', - '--nosplash', '--quiet', '-q', '--verbose', '--no-color'], - ) - self.assertTrue(self.t.nosplash) - self.assertTrue(self.t.quiet) - self.assertTrue(self.t.verbose) - self.assertTrue(self.t.no_color) - - def test_stopwait(self): - self.t._stop_nodes = Mock() - self.t.stopwait(['foo', 'bar', 'baz'], 'celery worker') - self.assertEqual(self.t._stop_nodes.call_args[1]['retry'], 2) - - @patch('celery.bin.multi.MultiTool') - def test_main(self, MultiTool): - m = MultiTool.return_value = Mock() - with self.assertRaises(SystemExit): - main() - m.execute_from_commandline.assert_called_with(sys.argv) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_worker.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_worker.py deleted file mode 100644 index bc63940..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_worker.py +++ /dev/null @@ -1,681 +0,0 @@ -from __future__ import absolute_import - -import logging -import os -import sys - -from billiard import current_process -from kombu import Exchange, Queue - -from celery import platforms -from celery import signals -from celery.app import trace -from celery.apps import worker as cd -from celery.bin.worker import worker, main as worker_main -from celery.exceptions import ( - ImproperlyConfigured, WorkerShutdown, WorkerTerminate, -) -from celery.utils.log import ensure_process_aware_logger -from celery.worker import state - -from celery.tests.case import ( - AppCase, - Mock, - SkipTest, - disable_stdouts, - patch, - skip_if_pypy, - skip_if_jython, -) - -ensure_process_aware_logger() - - -class WorkerAppCase(AppCase): - - def tearDown(self): - super(WorkerAppCase, self).tearDown() - trace.reset_worker_optimizations() - - -class Worker(cd.Worker): - redirect_stdouts = False - - def start(self, *args, **kwargs): - self.on_start() - - -class test_Worker(WorkerAppCase): - Worker = Worker - - @disable_stdouts - def test_queues_string(self): - w = self.app.Worker() - w.setup_queues('foo,bar,baz') - self.assertTrue('foo' in self.app.amqp.queues) - - @disable_stdouts - def test_cpu_count(self): - with patch('celery.worker.cpu_count') as cpu_count: - cpu_count.side_effect = NotImplementedError() - w = self.app.Worker(concurrency=None) - self.assertEqual(w.concurrency, 2) - w = self.app.Worker(concurrency=5) - self.assertEqual(w.concurrency, 5) - - @disable_stdouts - def test_windows_B_option(self): - self.app.IS_WINDOWS = True - with self.assertRaises(SystemExit): - worker(app=self.app).run(beat=True) - - def test_setup_concurrency_very_early(self): - x = worker() - x.run = Mock() - with self.assertRaises(ImportError): - x.execute_from_commandline(['worker', '-P', 'xyzybox']) - - def test_run_from_argv_basic(self): - x = worker(app=self.app) - x.run = Mock() - x.maybe_detach = Mock() - - def run(*args, **kwargs): - pass - x.run = run - x.run_from_argv('celery', []) - self.assertTrue(x.maybe_detach.called) - - def test_maybe_detach(self): - x = worker(app=self.app) - with patch('celery.bin.worker.detached_celeryd') as detached: - x.maybe_detach([]) - self.assertFalse(detached.called) - with self.assertRaises(SystemExit): - x.maybe_detach(['--detach']) - self.assertTrue(detached.called) - - @disable_stdouts - def test_invalid_loglevel_gives_error(self): - x = worker(app=self.app) - with self.assertRaises(SystemExit): - x.run(loglevel='GRIM_REAPER') - - def test_no_loglevel(self): - self.app.Worker = Mock() - worker(app=self.app).run(loglevel=None) - - def test_tasklist(self): - worker = self.app.Worker() - self.assertTrue(worker.app.tasks) - self.assertTrue(worker.app.finalized) - self.assertTrue(worker.tasklist(include_builtins=True)) - worker.tasklist(include_builtins=False) - - def test_extra_info(self): - worker = self.app.Worker() - worker.loglevel = logging.WARNING - self.assertFalse(worker.extra_info()) - worker.loglevel = logging.INFO - self.assertTrue(worker.extra_info()) - - @disable_stdouts - def test_loglevel_string(self): - worker = self.Worker(app=self.app, loglevel='INFO') - self.assertEqual(worker.loglevel, logging.INFO) - - @disable_stdouts - def test_run_worker(self): - handlers = {} - - class Signals(platforms.Signals): - - def __setitem__(self, sig, handler): - handlers[sig] = handler - - p = platforms.signals - platforms.signals = Signals() - try: - w = self.Worker(app=self.app) - w._isatty = False - w.on_start() - for sig in 'SIGINT', 'SIGHUP', 'SIGTERM': - self.assertIn(sig, handlers) - - handlers.clear() - w = self.Worker(app=self.app) - w._isatty = True - w.on_start() - for sig in 'SIGINT', 'SIGTERM': - self.assertIn(sig, handlers) - self.assertNotIn('SIGHUP', handlers) - finally: - platforms.signals = p - - @disable_stdouts - def test_startup_info(self): - worker = self.Worker(app=self.app) - worker.on_start() - self.assertTrue(worker.startup_info()) - worker.loglevel = logging.DEBUG - self.assertTrue(worker.startup_info()) - worker.loglevel = logging.INFO - self.assertTrue(worker.startup_info()) - worker.autoscale = 13, 10 - self.assertTrue(worker.startup_info()) - - prev_loader = self.app.loader - worker = self.Worker(app=self.app, queues='foo,bar,baz,xuzzy,do,re,mi') - self.app.loader = Mock() - self.app.loader.__module__ = 'acme.baked_beans' - self.assertTrue(worker.startup_info()) - - self.app.loader = Mock() - self.app.loader.__module__ = 'celery.loaders.foo' - self.assertTrue(worker.startup_info()) - - from celery.loaders.app import AppLoader - self.app.loader = AppLoader(app=self.app) - self.assertTrue(worker.startup_info()) - - self.app.loader = prev_loader - worker.send_events = True - self.assertTrue(worker.startup_info()) - - # test when there are too few output lines - # to draft the ascii art onto - prev, cd.ARTLINES = cd.ARTLINES, ['the quick brown fox'] - try: - self.assertTrue(worker.startup_info()) - finally: - cd.ARTLINES = prev - - @disable_stdouts - def test_run(self): - self.Worker(app=self.app).on_start() - self.Worker(app=self.app, purge=True).on_start() - worker = self.Worker(app=self.app) - worker.on_start() - - @disable_stdouts - def test_purge_messages(self): - self.Worker(app=self.app).purge_messages() - - @disable_stdouts - def test_init_queues(self): - app = self.app - c = app.conf - app.amqp.queues = app.amqp.Queues({ - 'celery': {'exchange': 'celery', - 'routing_key': 'celery'}, - 'video': {'exchange': 'video', - 'routing_key': 'video'}, - }) - worker = self.Worker(app=self.app) - worker.setup_queues(['video']) - self.assertIn('video', app.amqp.queues) - self.assertIn('video', app.amqp.queues.consume_from) - self.assertIn('celery', app.amqp.queues) - self.assertNotIn('celery', app.amqp.queues.consume_from) - - c.CELERY_CREATE_MISSING_QUEUES = False - del(app.amqp.queues) - with self.assertRaises(ImproperlyConfigured): - self.Worker(app=self.app).setup_queues(['image']) - del(app.amqp.queues) - c.CELERY_CREATE_MISSING_QUEUES = True - worker = self.Worker(app=self.app) - worker.setup_queues(['image']) - self.assertIn('image', app.amqp.queues.consume_from) - self.assertEqual( - Queue('image', Exchange('image'), routing_key='image'), - app.amqp.queues['image'], - ) - - @disable_stdouts - def test_autoscale_argument(self): - worker1 = self.Worker(app=self.app, autoscale='10,3') - self.assertListEqual(worker1.autoscale, [10, 3]) - worker2 = self.Worker(app=self.app, autoscale='10') - self.assertListEqual(worker2.autoscale, [10, 0]) - self.assert_no_logging_side_effect() - - def test_include_argument(self): - worker1 = self.Worker(app=self.app, include='os') - self.assertListEqual(worker1.include, ['os']) - worker2 = self.Worker(app=self.app, - include='os,sys') - self.assertListEqual(worker2.include, ['os', 'sys']) - self.Worker(app=self.app, include=['os', 'sys']) - - @disable_stdouts - def test_unknown_loglevel(self): - with self.assertRaises(SystemExit): - worker(app=self.app).run(loglevel='ALIEN') - worker1 = self.Worker(app=self.app, loglevel=0xFFFF) - self.assertEqual(worker1.loglevel, 0xFFFF) - - @disable_stdouts - @patch('os._exit') - def test_warns_if_running_as_privileged_user(self, _exit): - app = self.app - if app.IS_WINDOWS: - raise SkipTest('Not applicable on Windows') - - with patch('os.getuid') as getuid: - getuid.return_value = 0 - self.app.conf.CELERY_ACCEPT_CONTENT = ['pickle'] - worker = self.Worker(app=self.app) - worker.on_start() - _exit.assert_called_with(1) - from celery import platforms - platforms.C_FORCE_ROOT = True - try: - with self.assertWarnsRegex( - RuntimeWarning, - r'absolutely not recommended'): - worker = self.Worker(app=self.app) - worker.on_start() - finally: - platforms.C_FORCE_ROOT = False - self.app.conf.CELERY_ACCEPT_CONTENT = ['json'] - with self.assertWarnsRegex( - RuntimeWarning, - r'absolutely not recommended'): - worker = self.Worker(app=self.app) - worker.on_start() - - @disable_stdouts - def test_redirect_stdouts(self): - self.Worker(app=self.app, redirect_stdouts=False) - with self.assertRaises(AttributeError): - sys.stdout.logger - - @disable_stdouts - def test_on_start_custom_logging(self): - self.app.log.redirect_stdouts = Mock() - worker = self.Worker(app=self.app, redirect_stoutds=True) - worker._custom_logging = True - worker.on_start() - self.assertFalse(self.app.log.redirect_stdouts.called) - - def test_setup_logging_no_color(self): - worker = self.Worker( - app=self.app, redirect_stdouts=False, no_color=True, - ) - prev, self.app.log.setup = self.app.log.setup, Mock() - try: - worker.setup_logging() - self.assertFalse(self.app.log.setup.call_args[1]['colorize']) - finally: - self.app.log.setup = prev - - @disable_stdouts - def test_startup_info_pool_is_str(self): - worker = self.Worker(app=self.app, redirect_stdouts=False) - worker.pool_cls = 'foo' - worker.startup_info() - - def test_redirect_stdouts_already_handled(self): - logging_setup = [False] - - @signals.setup_logging.connect - def on_logging_setup(**kwargs): - logging_setup[0] = True - - try: - worker = self.Worker(app=self.app, redirect_stdouts=False) - worker.app.log.already_setup = False - worker.setup_logging() - self.assertTrue(logging_setup[0]) - with self.assertRaises(AttributeError): - sys.stdout.logger - finally: - signals.setup_logging.disconnect(on_logging_setup) - - @disable_stdouts - def test_platform_tweaks_osx(self): - - class OSXWorker(Worker): - proxy_workaround_installed = False - - def osx_proxy_detection_workaround(self): - self.proxy_workaround_installed = True - - worker = OSXWorker(app=self.app, redirect_stdouts=False) - - def install_HUP_nosupport(controller): - controller.hup_not_supported_installed = True - - class Controller(object): - pass - - prev = cd.install_HUP_not_supported_handler - cd.install_HUP_not_supported_handler = install_HUP_nosupport - try: - worker.app.IS_OSX = True - controller = Controller() - worker.install_platform_tweaks(controller) - self.assertTrue(controller.hup_not_supported_installed) - self.assertTrue(worker.proxy_workaround_installed) - finally: - cd.install_HUP_not_supported_handler = prev - - @disable_stdouts - def test_general_platform_tweaks(self): - - restart_worker_handler_installed = [False] - - def install_worker_restart_handler(worker): - restart_worker_handler_installed[0] = True - - class Controller(object): - pass - - prev = cd.install_worker_restart_handler - cd.install_worker_restart_handler = install_worker_restart_handler - try: - worker = self.Worker(app=self.app) - worker.app.IS_OSX = False - worker.install_platform_tweaks(Controller()) - self.assertTrue(restart_worker_handler_installed[0]) - finally: - cd.install_worker_restart_handler = prev - - @disable_stdouts - def test_on_consumer_ready(self): - worker_ready_sent = [False] - - @signals.worker_ready.connect - def on_worker_ready(**kwargs): - worker_ready_sent[0] = True - - self.Worker(app=self.app).on_consumer_ready(object()) - self.assertTrue(worker_ready_sent[0]) - - -class test_funs(WorkerAppCase): - - def test_active_thread_count(self): - self.assertTrue(cd.active_thread_count()) - - @disable_stdouts - def test_set_process_status(self): - try: - __import__('setproctitle') - except ImportError: - raise SkipTest('setproctitle not installed') - worker = Worker(app=self.app, hostname='xyzza') - prev1, sys.argv = sys.argv, ['Arg0'] - try: - st = worker.set_process_status('Running') - self.assertIn('celeryd', st) - self.assertIn('xyzza', st) - self.assertIn('Running', st) - prev2, sys.argv = sys.argv, ['Arg0', 'Arg1'] - try: - st = worker.set_process_status('Running') - self.assertIn('celeryd', st) - self.assertIn('xyzza', st) - self.assertIn('Running', st) - self.assertIn('Arg1', st) - finally: - sys.argv = prev2 - finally: - sys.argv = prev1 - - @disable_stdouts - def test_parse_options(self): - cmd = worker() - cmd.app = self.app - opts, args = cmd.parse_options('worker', ['--concurrency=512', - '--heartbeat-interval=10']) - self.assertEqual(opts.concurrency, 512) - self.assertEqual(opts.heartbeat_interval, 10) - - @disable_stdouts - def test_main(self): - p, cd.Worker = cd.Worker, Worker - s, sys.argv = sys.argv, ['worker', '--discard'] - try: - worker_main(app=self.app) - finally: - cd.Worker = p - sys.argv = s - - -class test_signal_handlers(WorkerAppCase): - - class _Worker(object): - stopped = False - terminated = False - - def stop(self, in_sighandler=False): - self.stopped = True - - def terminate(self, in_sighandler=False): - self.terminated = True - - def psig(self, fun, *args, **kwargs): - handlers = {} - - class Signals(platforms.Signals): - def __setitem__(self, sig, handler): - handlers[sig] = handler - - p, platforms.signals = platforms.signals, Signals() - try: - fun(*args, **kwargs) - return handlers - finally: - platforms.signals = p - - @disable_stdouts - def test_worker_int_handler(self): - worker = self._Worker() - handlers = self.psig(cd.install_worker_int_handler, worker) - next_handlers = {} - state.should_stop = False - state.should_terminate = False - - class Signals(platforms.Signals): - - def __setitem__(self, sig, handler): - next_handlers[sig] = handler - - with patch('celery.apps.worker.active_thread_count') as c: - c.return_value = 3 - p, platforms.signals = platforms.signals, Signals() - try: - handlers['SIGINT']('SIGINT', object()) - self.assertTrue(state.should_stop) - finally: - platforms.signals = p - state.should_stop = False - - try: - next_handlers['SIGINT']('SIGINT', object()) - self.assertTrue(state.should_terminate) - finally: - state.should_terminate = False - - with patch('celery.apps.worker.active_thread_count') as c: - c.return_value = 1 - p, platforms.signals = platforms.signals, Signals() - try: - with self.assertRaises(WorkerShutdown): - handlers['SIGINT']('SIGINT', object()) - finally: - platforms.signals = p - - with self.assertRaises(WorkerTerminate): - next_handlers['SIGINT']('SIGINT', object()) - - @disable_stdouts - def test_worker_int_handler_only_stop_MainProcess(self): - try: - import _multiprocessing # noqa - except ImportError: - raise SkipTest('only relevant for multiprocessing') - process = current_process() - name, process.name = process.name, 'OtherProcess' - with patch('celery.apps.worker.active_thread_count') as c: - c.return_value = 3 - try: - worker = self._Worker() - handlers = self.psig(cd.install_worker_int_handler, worker) - handlers['SIGINT']('SIGINT', object()) - self.assertTrue(state.should_stop) - finally: - process.name = name - state.should_stop = False - - with patch('celery.apps.worker.active_thread_count') as c: - c.return_value = 1 - try: - worker = self._Worker() - handlers = self.psig(cd.install_worker_int_handler, worker) - with self.assertRaises(WorkerShutdown): - handlers['SIGINT']('SIGINT', object()) - finally: - process.name = name - state.should_stop = False - - @disable_stdouts - def test_install_HUP_not_supported_handler(self): - worker = self._Worker() - handlers = self.psig(cd.install_HUP_not_supported_handler, worker) - handlers['SIGHUP']('SIGHUP', object()) - - @disable_stdouts - def test_worker_term_hard_handler_only_stop_MainProcess(self): - try: - import _multiprocessing # noqa - except ImportError: - raise SkipTest('only relevant for multiprocessing') - process = current_process() - name, process.name = process.name, 'OtherProcess' - try: - with patch('celery.apps.worker.active_thread_count') as c: - c.return_value = 3 - worker = self._Worker() - handlers = self.psig( - cd.install_worker_term_hard_handler, worker) - try: - handlers['SIGQUIT']('SIGQUIT', object()) - self.assertTrue(state.should_terminate) - finally: - state.should_terminate = False - with patch('celery.apps.worker.active_thread_count') as c: - c.return_value = 1 - worker = self._Worker() - handlers = self.psig( - cd.install_worker_term_hard_handler, worker) - with self.assertRaises(WorkerTerminate): - handlers['SIGQUIT']('SIGQUIT', object()) - finally: - process.name = name - - @disable_stdouts - def test_worker_term_handler_when_threads(self): - with patch('celery.apps.worker.active_thread_count') as c: - c.return_value = 3 - worker = self._Worker() - handlers = self.psig(cd.install_worker_term_handler, worker) - try: - handlers['SIGTERM']('SIGTERM', object()) - self.assertTrue(state.should_stop) - finally: - state.should_stop = False - - @disable_stdouts - def test_worker_term_handler_when_single_thread(self): - with patch('celery.apps.worker.active_thread_count') as c: - c.return_value = 1 - worker = self._Worker() - handlers = self.psig(cd.install_worker_term_handler, worker) - try: - with self.assertRaises(WorkerShutdown): - handlers['SIGTERM']('SIGTERM', object()) - finally: - state.should_stop = False - - @patch('sys.__stderr__') - @skip_if_pypy - @skip_if_jython - def test_worker_cry_handler(self, stderr): - handlers = self.psig(cd.install_cry_handler) - self.assertIsNone(handlers['SIGUSR1']('SIGUSR1', object())) - self.assertTrue(stderr.write.called) - - @disable_stdouts - def test_worker_term_handler_only_stop_MainProcess(self): - try: - import _multiprocessing # noqa - except ImportError: - raise SkipTest('only relevant for multiprocessing') - process = current_process() - name, process.name = process.name, 'OtherProcess' - try: - with patch('celery.apps.worker.active_thread_count') as c: - c.return_value = 3 - worker = self._Worker() - handlers = self.psig(cd.install_worker_term_handler, worker) - handlers['SIGTERM']('SIGTERM', object()) - self.assertTrue(state.should_stop) - with patch('celery.apps.worker.active_thread_count') as c: - c.return_value = 1 - worker = self._Worker() - handlers = self.psig(cd.install_worker_term_handler, worker) - with self.assertRaises(WorkerShutdown): - handlers['SIGTERM']('SIGTERM', object()) - finally: - process.name = name - state.should_stop = False - - @disable_stdouts - @patch('celery.platforms.close_open_fds') - @patch('atexit.register') - @patch('os.close') - def test_worker_restart_handler(self, _close, register, close_open): - if getattr(os, 'execv', None) is None: - raise SkipTest('platform does not have excv') - argv = [] - - def _execv(*args): - argv.extend(args) - - execv, os.execv = os.execv, _execv - try: - worker = self._Worker() - handlers = self.psig(cd.install_worker_restart_handler, worker) - handlers['SIGHUP']('SIGHUP', object()) - self.assertTrue(state.should_stop) - self.assertTrue(register.called) - callback = register.call_args[0][0] - callback() - self.assertTrue(argv) - finally: - os.execv = execv - state.should_stop = False - - @disable_stdouts - def test_worker_term_hard_handler_when_threaded(self): - with patch('celery.apps.worker.active_thread_count') as c: - c.return_value = 3 - worker = self._Worker() - handlers = self.psig(cd.install_worker_term_hard_handler, worker) - try: - handlers['SIGQUIT']('SIGQUIT', object()) - self.assertTrue(state.should_terminate) - finally: - state.should_terminate = False - - @disable_stdouts - def test_worker_term_hard_handler_when_single_threaded(self): - with patch('celery.apps.worker.active_thread_count') as c: - c.return_value = 1 - worker = self._Worker() - handlers = self.psig(cd.install_worker_term_hard_handler, worker) - with self.assertRaises(WorkerTerminate): - handlers['SIGQUIT']('SIGQUIT', object()) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/case.py b/thesisenv/lib/python3.6/site-packages/celery/tests/case.py deleted file mode 100644 index a9e65cd..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/case.py +++ /dev/null @@ -1,880 +0,0 @@ -from __future__ import absolute_import - -try: - import unittest # noqa - unittest.skip - from unittest.util import safe_repr, unorderable_list_difference -except AttributeError: - import unittest2 as unittest # noqa - from unittest2.util import safe_repr, unorderable_list_difference # noqa - -import importlib -import inspect -import logging -import numbers -import os -import platform -import re -import sys -import threading -import time -import types -import warnings - -from contextlib import contextmanager -from copy import deepcopy -from datetime import datetime, timedelta -from functools import partial, wraps -from types import ModuleType - -try: - from unittest import mock -except ImportError: - import mock # noqa -from nose import SkipTest -from kombu import Queue -from kombu.log import NullHandler -from kombu.utils import nested, symbol_by_name - -from celery import Celery -from celery.app import current_app -from celery.backends.cache import CacheBackend, DummyClient -from celery.exceptions import CDeprecationWarning, CPendingDeprecationWarning -from celery.five import ( - WhateverIO, builtins, items, reraise, - string_t, values, open_fqdn, -) -from celery.utils.functional import noop -from celery.utils.imports import qualname - -__all__ = [ - 'Case', 'AppCase', 'Mock', 'MagicMock', 'ANY', - 'patch', 'call', 'sentinel', 'skip_unless_module', - 'wrap_logger', 'with_environ', 'sleepdeprived', - 'skip_if_environ', 'todo', 'skip', 'skip_if', - 'skip_unless', 'mask_modules', 'override_stdouts', 'mock_module', - 'replace_module_value', 'sys_platform', 'reset_modules', - 'patch_modules', 'mock_context', 'mock_open', 'patch_many', - 'assert_signal_called', 'skip_if_pypy', - 'skip_if_jython', 'body_from_sig', 'restore_logging', -] -patch = mock.patch -call = mock.call -sentinel = mock.sentinel -MagicMock = mock.MagicMock -ANY = mock.ANY - -PY3 = sys.version_info[0] == 3 - -CASE_REDEFINES_SETUP = """\ -{name} (subclass of AppCase) redefines private "setUp", should be: "setup"\ -""" -CASE_REDEFINES_TEARDOWN = """\ -{name} (subclass of AppCase) redefines private "tearDown", \ -should be: "teardown"\ -""" -CASE_LOG_REDIRECT_EFFECT = """\ -Test {0} did not disable LoggingProxy for {1}\ -""" -CASE_LOG_LEVEL_EFFECT = """\ -Test {0} Modified the level of the root logger\ -""" -CASE_LOG_HANDLER_EFFECT = """\ -Test {0} Modified handlers for the root logger\ -""" - -CELERY_TEST_CONFIG = { - #: Don't want log output when running suite. - 'CELERYD_HIJACK_ROOT_LOGGER': False, - 'CELERY_SEND_TASK_ERROR_EMAILS': False, - 'CELERY_DEFAULT_QUEUE': 'testcelery', - 'CELERY_DEFAULT_EXCHANGE': 'testcelery', - 'CELERY_DEFAULT_ROUTING_KEY': 'testcelery', - 'CELERY_QUEUES': ( - Queue('testcelery', routing_key='testcelery'), - ), - 'CELERY_ENABLE_UTC': True, - 'CELERY_TIMEZONE': 'UTC', - 'CELERYD_LOG_COLOR': False, - - # Mongo results tests (only executed if installed and running) - 'CELERY_MONGODB_BACKEND_SETTINGS': { - 'host': os.environ.get('MONGO_HOST') or 'localhost', - 'port': os.environ.get('MONGO_PORT') or 27017, - 'database': os.environ.get('MONGO_DB') or 'celery_unittests', - 'taskmeta_collection': (os.environ.get('MONGO_TASKMETA_COLLECTION') or - 'taskmeta_collection'), - 'user': os.environ.get('MONGO_USER'), - 'password': os.environ.get('MONGO_PASSWORD'), - } -} - - -class Trap(object): - - def __getattr__(self, name): - raise RuntimeError('Test depends on current_app') - - -class UnitLogging(symbol_by_name(Celery.log_cls)): - - def __init__(self, *args, **kwargs): - super(UnitLogging, self).__init__(*args, **kwargs) - self.already_setup = True - - -def UnitApp(name=None, broker=None, backend=None, - set_as_current=False, log=UnitLogging, **kwargs): - - app = Celery(name or 'celery.tests', - broker=broker or 'memory://', - backend=backend or 'cache+memory://', - set_as_current=set_as_current, - log=log, - **kwargs) - app.add_defaults(deepcopy(CELERY_TEST_CONFIG)) - return app - - -class Mock(mock.Mock): - - def __init__(self, *args, **kwargs): - attrs = kwargs.pop('attrs', None) or {} - super(Mock, self).__init__(*args, **kwargs) - for attr_name, attr_value in items(attrs): - setattr(self, attr_name, attr_value) - - -class _ContextMock(Mock): - """Dummy class implementing __enter__ and __exit__ - as the with statement requires these to be implemented - in the class, not just the instance.""" - - def __enter__(self): - pass - - def __exit__(self, *exc_info): - pass - - -def ContextMock(*args, **kwargs): - obj = _ContextMock(*args, **kwargs) - obj.attach_mock(_ContextMock(), '__enter__') - obj.attach_mock(_ContextMock(), '__exit__') - obj.__enter__.return_value = obj - # if __exit__ return a value the exception is ignored, - # so it must return None here. - obj.__exit__.return_value = None - return obj - - -def _bind(f, o): - @wraps(f) - def bound_meth(*fargs, **fkwargs): - return f(o, *fargs, **fkwargs) - return bound_meth - - -if PY3: # pragma: no cover - def _get_class_fun(meth): - return meth -else: - def _get_class_fun(meth): - return meth.__func__ - - -class MockCallbacks(object): - - def __new__(cls, *args, **kwargs): - r = Mock(name=cls.__name__) - _get_class_fun(cls.__init__)(r, *args, **kwargs) - for key, value in items(vars(cls)): - if key not in ('__dict__', '__weakref__', '__new__', '__init__'): - if inspect.ismethod(value) or inspect.isfunction(value): - r.__getattr__(key).side_effect = _bind(value, r) - else: - r.__setattr__(key, value) - return r - - -def skip_unless_module(module): - - def _inner(fun): - - @wraps(fun) - def __inner(*args, **kwargs): - try: - importlib.import_module(module) - except ImportError: - raise SkipTest('Does not have %s' % (module, )) - - return fun(*args, **kwargs) - - return __inner - return _inner - - -# -- adds assertWarns from recent unittest2, not in Python 2.7. - -class _AssertRaisesBaseContext(object): - - def __init__(self, expected, test_case, callable_obj=None, - expected_regex=None): - self.expected = expected - self.failureException = test_case.failureException - self.obj_name = None - if isinstance(expected_regex, string_t): - expected_regex = re.compile(expected_regex) - self.expected_regex = expected_regex - - -def _is_magic_module(m): - # some libraries create custom module types that are lazily - # lodaded, e.g. Django installs some modules in sys.modules that - # will load _tkinter and other shit when touched. - - # pyflakes refuses to accept 'noqa' for this isinstance. - cls, modtype = type(m), types.ModuleType - try: - variables = vars(cls) - except TypeError: - return True - else: - return (cls is not modtype and ( - '__getattr__' in variables or - '__getattribute__' in variables)) - - -class _AssertWarnsContext(_AssertRaisesBaseContext): - """A context manager used to implement TestCase.assertWarns* methods.""" - - def __enter__(self): - # The __warningregistry__'s need to be in a pristine state for tests - # to work properly. - warnings.resetwarnings() - for v in list(values(sys.modules)): - # do not evaluate Django moved modules and other lazily - # initialized modules. - if v and not _is_magic_module(v): - # use raw __getattribute__ to protect even better from - # lazily loaded modules - try: - object.__getattribute__(v, '__warningregistry__') - except AttributeError: - pass - else: - object.__setattr__(v, '__warningregistry__', {}) - self.warnings_manager = warnings.catch_warnings(record=True) - self.warnings = self.warnings_manager.__enter__() - warnings.simplefilter('always', self.expected) - return self - - def __exit__(self, exc_type, exc_value, tb): - self.warnings_manager.__exit__(exc_type, exc_value, tb) - if exc_type is not None: - # let unexpected exceptions pass through - return - try: - exc_name = self.expected.__name__ - except AttributeError: - exc_name = str(self.expected) - first_matching = None - for m in self.warnings: - w = m.message - if not isinstance(w, self.expected): - continue - if first_matching is None: - first_matching = w - if (self.expected_regex is not None and - not self.expected_regex.search(str(w))): - continue - # store warning for later retrieval - self.warning = w - self.filename = m.filename - self.lineno = m.lineno - return - # Now we simply try to choose a helpful failure message - if first_matching is not None: - raise self.failureException( - '%r does not match %r' % ( - self.expected_regex.pattern, str(first_matching))) - if self.obj_name: - raise self.failureException( - '%s not triggered by %s' % (exc_name, self.obj_name)) - else: - raise self.failureException('%s not triggered' % exc_name) - - -class Case(unittest.TestCase): - - def assertWarns(self, expected_warning): - return _AssertWarnsContext(expected_warning, self, None) - - def assertWarnsRegex(self, expected_warning, expected_regex): - return _AssertWarnsContext(expected_warning, self, - None, expected_regex) - - @contextmanager - def assertDeprecated(self): - with self.assertWarnsRegex(CDeprecationWarning, - r'scheduled for removal'): - yield - - @contextmanager - def assertPendingDeprecation(self): - with self.assertWarnsRegex(CPendingDeprecationWarning, - r'scheduled for deprecation'): - yield - - def assertDictContainsSubset(self, expected, actual, msg=None): - missing, mismatched = [], [] - - for key, value in items(expected): - if key not in actual: - missing.append(key) - elif value != actual[key]: - mismatched.append('%s, expected: %s, actual: %s' % ( - safe_repr(key), safe_repr(value), - safe_repr(actual[key]))) - - if not (missing or mismatched): - return - - standard_msg = '' - if missing: - standard_msg = 'Missing: %s' % ','.join(map(safe_repr, missing)) - - if mismatched: - if standard_msg: - standard_msg += '; ' - standard_msg += 'Mismatched values: %s' % ( - ','.join(mismatched)) - - self.fail(self._formatMessage(msg, standard_msg)) - - def assertItemsEqual(self, expected_seq, actual_seq, msg=None): - missing = unexpected = None - try: - expected = sorted(expected_seq) - actual = sorted(actual_seq) - except TypeError: - # Unsortable items (example: set(), complex(), ...) - expected = list(expected_seq) - actual = list(actual_seq) - missing, unexpected = unorderable_list_difference( - expected, actual) - else: - return self.assertSequenceEqual(expected, actual, msg=msg) - - errors = [] - if missing: - errors.append( - 'Expected, but missing:\n %s' % (safe_repr(missing), ) - ) - if unexpected: - errors.append( - 'Unexpected, but present:\n %s' % (safe_repr(unexpected), ) - ) - if errors: - standardMsg = '\n'.join(errors) - self.fail(self._formatMessage(msg, standardMsg)) - - -def depends_on_current_app(fun): - if inspect.isclass(fun): - fun.contained = False - else: - @wraps(fun) - def __inner(self, *args, **kwargs): - self.app.set_current() - return fun(self, *args, **kwargs) - return __inner - - -class AppCase(Case): - contained = True - - def __init__(self, *args, **kwargs): - super(AppCase, self).__init__(*args, **kwargs) - if self.__class__.__dict__.get('setUp'): - raise RuntimeError( - CASE_REDEFINES_SETUP.format(name=qualname(self)), - ) - if self.__class__.__dict__.get('tearDown'): - raise RuntimeError( - CASE_REDEFINES_TEARDOWN.format(name=qualname(self)), - ) - - def Celery(self, *args, **kwargs): - return UnitApp(*args, **kwargs) - - def setUp(self): - self._threads_at_setup = list(threading.enumerate()) - from celery import _state - from celery import result - result.task_join_will_block = \ - _state.task_join_will_block = lambda: False - self._current_app = current_app() - self._default_app = _state.default_app - trap = Trap() - self._prev_tls = _state._tls - _state.set_default_app(trap) - - class NonTLS(object): - current_app = trap - _state._tls = NonTLS() - - self.app = self.Celery(set_as_current=False) - if not self.contained: - self.app.set_current() - root = logging.getLogger() - self.__rootlevel = root.level - self.__roothandlers = root.handlers - _state._set_task_join_will_block(False) - try: - self.setup() - except: - self._teardown_app() - raise - - def _teardown_app(self): - from celery.utils.log import LoggingProxy - assert sys.stdout - assert sys.stderr - assert sys.__stdout__ - assert sys.__stderr__ - this = self._get_test_name() - if isinstance(sys.stdout, (LoggingProxy, Mock)) or \ - isinstance(sys.__stdout__, (LoggingProxy, Mock)): - raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, 'stdout')) - if isinstance(sys.stderr, (LoggingProxy, Mock)) or \ - isinstance(sys.__stderr__, (LoggingProxy, Mock)): - raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, 'stderr')) - backend = self.app.__dict__.get('backend') - if backend is not None: - if isinstance(backend, CacheBackend): - if isinstance(backend.client, DummyClient): - backend.client.cache.clear() - backend._cache.clear() - from celery import _state - _state._set_task_join_will_block(False) - - _state.set_default_app(self._default_app) - _state._tls = self._prev_tls - _state._tls.current_app = self._current_app - if self.app is not self._current_app: - self.app.close() - self.app = None - self.assertEqual( - self._threads_at_setup, list(threading.enumerate()), - ) - - def _get_test_name(self): - return '.'.join([self.__class__.__name__, self._testMethodName]) - - def tearDown(self): - try: - self.teardown() - finally: - self._teardown_app() - self.assert_no_logging_side_effect() - - def assert_no_logging_side_effect(self): - this = self._get_test_name() - root = logging.getLogger() - if root.level != self.__rootlevel: - raise RuntimeError(CASE_LOG_LEVEL_EFFECT.format(this)) - if root.handlers != self.__roothandlers: - raise RuntimeError(CASE_LOG_HANDLER_EFFECT.format(this)) - - def setup(self): - pass - - def teardown(self): - pass - - -def get_handlers(logger): - return [h for h in logger.handlers if not isinstance(h, NullHandler)] - - -@contextmanager -def wrap_logger(logger, loglevel=logging.ERROR): - old_handlers = get_handlers(logger) - sio = WhateverIO() - siohandler = logging.StreamHandler(sio) - logger.handlers = [siohandler] - - try: - yield sio - finally: - logger.handlers = old_handlers - - -def with_environ(env_name, env_value): - - def _envpatched(fun): - - @wraps(fun) - def _patch_environ(*args, **kwargs): - prev_val = os.environ.get(env_name) - os.environ[env_name] = env_value - try: - return fun(*args, **kwargs) - finally: - os.environ[env_name] = prev_val or '' - - return _patch_environ - return _envpatched - - -def sleepdeprived(module=time): - - def _sleepdeprived(fun): - - @wraps(fun) - def __sleepdeprived(*args, **kwargs): - old_sleep = module.sleep - module.sleep = noop - try: - return fun(*args, **kwargs) - finally: - module.sleep = old_sleep - - return __sleepdeprived - - return _sleepdeprived - - -def skip_if_environ(env_var_name): - - def _wrap_test(fun): - - @wraps(fun) - def _skips_if_environ(*args, **kwargs): - if os.environ.get(env_var_name): - raise SkipTest('SKIP %s: %s set\n' % ( - fun.__name__, env_var_name)) - return fun(*args, **kwargs) - - return _skips_if_environ - - return _wrap_test - - -def _skip_test(reason, sign): - - def _wrap_test(fun): - - @wraps(fun) - def _skipped_test(*args, **kwargs): - raise SkipTest('%s: %s' % (sign, reason)) - - return _skipped_test - return _wrap_test - - -def todo(reason): - """TODO test decorator.""" - return _skip_test(reason, 'TODO') - - -def skip(reason): - """Skip test decorator.""" - return _skip_test(reason, 'SKIP') - - -def skip_if(predicate, reason): - """Skip test if predicate is :const:`True`.""" - - def _inner(fun): - return predicate and skip(reason)(fun) or fun - - return _inner - - -def skip_unless(predicate, reason): - """Skip test if predicate is :const:`False`.""" - return skip_if(not predicate, reason) - - -# Taken from -# http://bitbucket.org/runeh/snippets/src/tip/missing_modules.py -@contextmanager -def mask_modules(*modnames): - """Ban some modules from being importable inside the context - - For example: - - >>> with mask_modules('sys'): - ... try: - ... import sys - ... except ImportError: - ... print('sys not found') - sys not found - - >>> import sys # noqa - >>> sys.version - (2, 5, 2, 'final', 0) - - """ - - realimport = builtins.__import__ - - def myimp(name, *args, **kwargs): - if name in modnames: - raise ImportError('No module named %s' % name) - else: - return realimport(name, *args, **kwargs) - - builtins.__import__ = myimp - try: - yield True - finally: - builtins.__import__ = realimport - - -@contextmanager -def override_stdouts(): - """Override `sys.stdout` and `sys.stderr` with `WhateverIO`.""" - prev_out, prev_err = sys.stdout, sys.stderr - prev_rout, prev_rerr = sys.__stdout__, sys.__stderr__ - mystdout, mystderr = WhateverIO(), WhateverIO() - sys.stdout = sys.__stdout__ = mystdout - sys.stderr = sys.__stderr__ = mystderr - - try: - yield mystdout, mystderr - finally: - sys.stdout = prev_out - sys.stderr = prev_err - sys.__stdout__ = prev_rout - sys.__stderr__ = prev_rerr - - -def disable_stdouts(fun): - - @wraps(fun) - def disable(*args, **kwargs): - with override_stdouts(): - return fun(*args, **kwargs) - return disable - - -def _old_patch(module, name, mocked): - module = importlib.import_module(module) - - def _patch(fun): - - @wraps(fun) - def __patched(*args, **kwargs): - prev = getattr(module, name) - setattr(module, name, mocked) - try: - return fun(*args, **kwargs) - finally: - setattr(module, name, prev) - return __patched - return _patch - - -@contextmanager -def replace_module_value(module, name, value=None): - has_prev = hasattr(module, name) - prev = getattr(module, name, None) - if value: - setattr(module, name, value) - else: - try: - delattr(module, name) - except AttributeError: - pass - try: - yield - finally: - if prev is not None: - setattr(module, name, prev) - if not has_prev: - try: - delattr(module, name) - except AttributeError: - pass -pypy_version = partial( - replace_module_value, sys, 'pypy_version_info', -) -platform_pyimp = partial( - replace_module_value, platform, 'python_implementation', -) - - -@contextmanager -def sys_platform(value): - prev, sys.platform = sys.platform, value - try: - yield - finally: - sys.platform = prev - - -@contextmanager -def reset_modules(*modules): - prev = dict((k, sys.modules.pop(k)) for k in modules if k in sys.modules) - try: - yield - finally: - sys.modules.update(prev) - - -@contextmanager -def patch_modules(*modules): - prev = {} - for mod in modules: - prev[mod] = sys.modules.get(mod) - sys.modules[mod] = ModuleType(mod) - try: - yield - finally: - for name, mod in items(prev): - if mod is None: - sys.modules.pop(name, None) - else: - sys.modules[name] = mod - - -@contextmanager -def mock_module(*names): - prev = {} - - class MockModule(ModuleType): - - def __getattr__(self, attr): - setattr(self, attr, Mock()) - return ModuleType.__getattribute__(self, attr) - - mods = [] - for name in names: - try: - prev[name] = sys.modules[name] - except KeyError: - pass - mod = sys.modules[name] = MockModule(name) - mods.append(mod) - try: - yield mods - finally: - for name in names: - try: - sys.modules[name] = prev[name] - except KeyError: - try: - del(sys.modules[name]) - except KeyError: - pass - - -@contextmanager -def mock_context(mock, typ=Mock): - context = mock.return_value = Mock() - context.__enter__ = typ() - context.__exit__ = typ() - - def on_exit(*x): - if x[0]: - reraise(x[0], x[1], x[2]) - context.__exit__.side_effect = on_exit - context.__enter__.return_value = context - try: - yield context - finally: - context.reset() - - -@contextmanager -def mock_open(typ=WhateverIO, side_effect=None): - with patch(open_fqdn) as open_: - with mock_context(open_) as context: - if side_effect is not None: - context.__enter__.side_effect = side_effect - val = context.__enter__.return_value = typ() - val.__exit__ = Mock() - yield val - - -def patch_many(*targets): - return nested(*[patch(target) for target in targets]) - - -@contextmanager -def assert_signal_called(signal, **expected): - handler = Mock() - call_handler = partial(handler) - signal.connect(call_handler) - try: - yield handler - finally: - signal.disconnect(call_handler) - handler.assert_called_with(signal=signal, **expected) - - -def skip_if_pypy(fun): - - @wraps(fun) - def _inner(*args, **kwargs): - if getattr(sys, 'pypy_version_info', None): - raise SkipTest('does not work on PyPy') - return fun(*args, **kwargs) - return _inner - - -def skip_if_jython(fun): - - @wraps(fun) - def _inner(*args, **kwargs): - if sys.platform.startswith('java'): - raise SkipTest('does not work on Jython') - return fun(*args, **kwargs) - return _inner - - -def body_from_sig(app, sig, utc=True): - sig.freeze() - callbacks = sig.options.pop('link', None) - errbacks = sig.options.pop('link_error', None) - countdown = sig.options.pop('countdown', None) - if countdown: - eta = app.now() + timedelta(seconds=countdown) - else: - eta = sig.options.pop('eta', None) - if eta and isinstance(eta, datetime): - eta = eta.isoformat() - expires = sig.options.pop('expires', None) - if expires and isinstance(expires, numbers.Real): - expires = app.now() + timedelta(seconds=expires) - if expires and isinstance(expires, datetime): - expires = expires.isoformat() - return { - 'task': sig.task, - 'id': sig.id, - 'args': sig.args, - 'kwargs': sig.kwargs, - 'callbacks': [dict(s) for s in callbacks] if callbacks else None, - 'errbacks': [dict(s) for s in errbacks] if errbacks else None, - 'eta': eta, - 'utc': utc, - 'expires': expires, - } - - -@contextmanager -def restore_logging(): - outs = sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__ - root = logging.getLogger() - level = root.level - handlers = root.handlers - - try: - yield - finally: - sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__ = outs - root.level = level - root.handlers[:] = handlers diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_compat.py b/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_compat.py deleted file mode 100644 index 02c7f7d..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_compat.py +++ /dev/null @@ -1,82 +0,0 @@ -from __future__ import absolute_import - -from datetime import timedelta - -import sys -sys.modules.pop('celery.task', None) - -from celery.schedules import schedule # noqa -from celery.task import ( # noqa - periodic_task, - PeriodicTask -) -from celery.utils.timeutils import timedelta_seconds # noqa - -from celery.tests.case import AppCase, depends_on_current_app # noqa - - -class test_Task(AppCase): - - def test_base_task_inherits_magic_kwargs_from_app(self): - from celery.task import Task as OldTask - - class timkX(OldTask): - abstract = True - - with self.Celery(set_as_current=False, - accept_magic_kwargs=True) as app: - timkX.bind(app) - # see #918 - self.assertFalse(timkX.accept_magic_kwargs) - - from celery import Task as NewTask - - class timkY(NewTask): - abstract = True - - timkY.bind(app) - self.assertFalse(timkY.accept_magic_kwargs) - - -@depends_on_current_app -class test_periodic_tasks(AppCase): - - def setup(self): - @periodic_task(app=self.app, shared=False, - run_every=schedule(timedelta(hours=1), app=self.app)) - def my_periodic(): - pass - self.my_periodic = my_periodic - - def now(self): - return self.app.now() - - def test_must_have_run_every(self): - with self.assertRaises(NotImplementedError): - type('Foo', (PeriodicTask, ), {'__module__': __name__}) - - def test_remaining_estimate(self): - s = self.my_periodic.run_every - self.assertIsInstance( - s.remaining_estimate(s.maybe_make_aware(self.now())), - timedelta) - - def test_is_due_not_due(self): - due, remaining = self.my_periodic.run_every.is_due(self.now()) - self.assertFalse(due) - # This assertion may fail if executed in the - # first minute of an hour, thus 59 instead of 60 - self.assertGreater(remaining, 59) - - def test_is_due(self): - p = self.my_periodic - due, remaining = p.run_every.is_due( - self.now() - p.run_every.run_every, - ) - self.assertTrue(due) - self.assertEqual(remaining, - timedelta_seconds(p.run_every.run_every)) - - def test_schedule_repr(self): - p = self.my_periodic - self.assertTrue(repr(p.run_every)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_compat_utils.py b/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_compat_utils.py deleted file mode 100644 index b041a0b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_compat_utils.py +++ /dev/null @@ -1,50 +0,0 @@ -from __future__ import absolute_import - -import celery - -from celery.app.task import Task as ModernTask -from celery.task.base import Task as CompatTask - -from celery.tests.case import AppCase, depends_on_current_app - - -@depends_on_current_app -class test_MagicModule(AppCase): - - def test_class_property_set_without_type(self): - self.assertTrue(ModernTask.__dict__['app'].__get__(CompatTask())) - - def test_class_property_set_on_class(self): - self.assertIs(ModernTask.__dict__['app'].__set__(None, None), - ModernTask.__dict__['app']) - - def test_class_property_set(self): - - class X(CompatTask): - pass - ModernTask.__dict__['app'].__set__(X(), self.app) - self.assertIs(X.app, self.app) - - def test_dir(self): - self.assertTrue(dir(celery.messaging)) - - def test_direct(self): - self.assertTrue(celery.task) - - def test_app_attrs(self): - self.assertEqual(celery.task.control.broadcast, - celery.current_app.control.broadcast) - - def test_decorators_task(self): - @celery.decorators.task - def _test_decorators_task(): - pass - - self.assertTrue(_test_decorators_task.accept_magic_kwargs) - - def test_decorators_periodic_task(self): - @celery.decorators.periodic_task(run_every=3600) - def _test_decorators_ptask(): - pass - - self.assertTrue(_test_decorators_ptask.accept_magic_kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_decorators.py b/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_decorators.py deleted file mode 100644 index 9f5dff9..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_decorators.py +++ /dev/null @@ -1,39 +0,0 @@ -from __future__ import absolute_import - -import warnings - -from celery.task import base - -from celery.tests.case import AppCase, depends_on_current_app - - -def add(x, y): - return x + y - - -@depends_on_current_app -class test_decorators(AppCase): - - def test_task_alias(self): - from celery import task - self.assertTrue(task.__file__) - self.assertTrue(task(add)) - - def setup(self): - with warnings.catch_warnings(record=True): - from celery import decorators - self.decorators = decorators - - def assertCompatDecorator(self, decorator, type, **opts): - task = decorator(**opts)(add) - self.assertEqual(task(8, 8), 16) - self.assertTrue(task.accept_magic_kwargs) - self.assertIsInstance(task, type) - - def test_task(self): - self.assertCompatDecorator(self.decorators.task, base.BaseTask) - - def test_periodic_task(self): - self.assertCompatDecorator(self.decorators.periodic_task, - base.BaseTask, - run_every=1) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_http.py b/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_http.py deleted file mode 100644 index 08505f8..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_http.py +++ /dev/null @@ -1,158 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import, unicode_literals - -from contextlib import contextmanager -from functools import wraps -try: - from urllib import addinfourl -except ImportError: # py3k - from urllib.request import addinfourl # noqa - -from anyjson import dumps -from kombu.utils.encoding import from_utf8 - -from celery.five import WhateverIO, items -from celery.task import http -from celery.tests.case import AppCase, Case - - -@contextmanager -def mock_urlopen(response_method): - - urlopen = http.urlopen - - @wraps(urlopen) - def _mocked(url, *args, **kwargs): - response_data, headers = response_method(url) - return addinfourl(WhateverIO(response_data), headers, url) - - http.urlopen = _mocked - - try: - yield True - finally: - http.urlopen = urlopen - - -def _response(res): - return lambda r: (res, []) - - -def success_response(value): - return _response(dumps({'status': 'success', 'retval': value})) - - -def fail_response(reason): - return _response(dumps({'status': 'failure', 'reason': reason})) - - -def unknown_response(): - return _response(dumps({'status': 'u.u.u.u', 'retval': True})) - - -class test_encodings(Case): - - def test_utf8dict(self): - uk = 'foobar' - d = {'følelser ær langé': 'ærbadægzaååÆØÅ', - from_utf8(uk): from_utf8('xuzzybaz')} - - for key, value in items(http.utf8dict(items(d))): - self.assertIsInstance(key, str) - self.assertIsInstance(value, str) - - -class test_MutableURL(Case): - - def test_url_query(self): - url = http.MutableURL('http://example.com?x=10&y=20&z=Foo') - self.assertDictContainsSubset({'x': '10', - 'y': '20', - 'z': 'Foo'}, url.query) - url.query['name'] = 'George' - url = http.MutableURL(str(url)) - self.assertDictContainsSubset({'x': '10', - 'y': '20', - 'z': 'Foo', - 'name': 'George'}, url.query) - - def test_url_keeps_everything(self): - url = 'https://e.com:808/foo/bar#zeta?x=10&y=20' - url = http.MutableURL(url) - - self.assertEqual( - str(url).split('?')[0], - 'https://e.com:808/foo/bar#zeta', - ) - - def test___repr__(self): - url = http.MutableURL('http://e.com/foo/bar') - self.assertTrue(repr(url).startswith(' 50: - return True - raise err - finally: - called[0] += 1 - sock.return_value.bind.side_effect = effect - with Rdb(out=out): - pass diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/events/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/events/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_cursesmon.py b/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_cursesmon.py deleted file mode 100644 index c8e6151..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_cursesmon.py +++ /dev/null @@ -1,70 +0,0 @@ -from __future__ import absolute_import - -from celery.tests.case import AppCase, SkipTest - - -class MockWindow(object): - - def getmaxyx(self): - return self.y, self.x - - -class test_CursesDisplay(AppCase): - - def setup(self): - try: - import curses # noqa - except ImportError: - raise SkipTest('curses monitor requires curses') - - from celery.events import cursesmon - self.monitor = cursesmon.CursesMonitor(object(), app=self.app) - self.win = MockWindow() - self.monitor.win = self.win - - def test_format_row_with_default_widths(self): - self.win.x, self.win.y = 91, 24 - row = self.monitor.format_row( - '783da208-77d0-40ca-b3d6-37dd6dbb55d3', - 'task.task.task.task.task.task.task.task.task.tas', - 'workerworkerworkerworkerworkerworkerworkerworker', - '21:13:20', - 'SUCCESS') - self.assertEqual('783da208-77d0-40ca-b3d6-37dd6dbb55d3 ' - 'workerworker... task.task.[.]tas 21:13:20 SUCCESS ', - row) - - def test_format_row_with_truncated_uuid(self): - self.win.x, self.win.y = 80, 24 - row = self.monitor.format_row( - '783da208-77d0-40ca-b3d6-37dd6dbb55d3', - 'task.task.task.task.task.task.task.task.task.tas', - 'workerworkerworkerworkerworkerworkerworkerworker', - '21:13:20', - 'SUCCESS') - self.assertEqual('783da208-77d0-40ca-b3d... workerworker... ' - 'task.task.[.]tas 21:13:20 SUCCESS ', - row) - - def test_format_title_row(self): - self.win.x, self.win.y = 80, 24 - row = self.monitor.format_row('UUID', 'TASK', - 'WORKER', 'TIME', 'STATE') - self.assertEqual('UUID WORKER ' - 'TASK TIME STATE ', - row) - - def test_format_row_for_wide_screen_with_short_uuid(self): - self.win.x, self.win.y = 140, 24 - row = self.monitor.format_row( - '783da208-77d0-40ca-b3d6-37dd6dbb55d3', - 'task.task.task.task.task.task.task.task.task.tas', - 'workerworkerworkerworkerworkerworkerworkerworker', - '21:13:20', - 'SUCCESS') - self.assertEqual(136, len(row)) - self.assertEqual('783da208-77d0-40ca-b3d6-37dd6dbb55d3 ' - 'workerworkerworkerworkerworkerworker... ' - 'task.task.task.task.task.task.task.[.]tas ' - '21:13:20 SUCCESS ', - row) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_events.py b/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_events.py deleted file mode 100644 index 791f416..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_events.py +++ /dev/null @@ -1,260 +0,0 @@ -from __future__ import absolute_import - -import socket - -from celery.events import Event -from celery.tests.case import AppCase, Mock - - -class MockProducer(object): - raise_on_publish = False - - def __init__(self, *args, **kwargs): - self.sent = [] - - def publish(self, msg, *args, **kwargs): - if self.raise_on_publish: - raise KeyError() - self.sent.append(msg) - - def close(self): - pass - - def has_event(self, kind): - for event in self.sent: - if event['type'] == kind: - return event - return False - - -class test_Event(AppCase): - - def test_constructor(self): - event = Event('world war II') - self.assertEqual(event['type'], 'world war II') - self.assertTrue(event['timestamp']) - - -class test_EventDispatcher(AppCase): - - def test_redis_uses_fanout_exchange(self): - self.app.connection = Mock() - conn = self.app.connection.return_value = Mock() - conn.transport.driver_type = 'redis' - - dispatcher = self.app.events.Dispatcher(conn, enabled=False) - self.assertEqual(dispatcher.exchange.type, 'fanout') - - def test_others_use_topic_exchange(self): - self.app.connection = Mock() - conn = self.app.connection.return_value = Mock() - conn.transport.driver_type = 'amqp' - dispatcher = self.app.events.Dispatcher(conn, enabled=False) - self.assertEqual(dispatcher.exchange.type, 'topic') - - def test_takes_channel_connection(self): - x = self.app.events.Dispatcher(channel=Mock()) - self.assertIs(x.connection, x.channel.connection.client) - - def test_sql_transports_disabled(self): - conn = Mock() - conn.transport.driver_type = 'sql' - x = self.app.events.Dispatcher(connection=conn) - self.assertFalse(x.enabled) - - def test_send(self): - producer = MockProducer() - producer.connection = self.app.connection() - connection = Mock() - connection.transport.driver_type = 'amqp' - eventer = self.app.events.Dispatcher(connection, enabled=False, - buffer_while_offline=False) - eventer.producer = producer - eventer.enabled = True - eventer.send('World War II', ended=True) - self.assertTrue(producer.has_event('World War II')) - eventer.enabled = False - eventer.send('World War III') - self.assertFalse(producer.has_event('World War III')) - - evs = ('Event 1', 'Event 2', 'Event 3') - eventer.enabled = True - eventer.producer.raise_on_publish = True - eventer.buffer_while_offline = False - with self.assertRaises(KeyError): - eventer.send('Event X') - eventer.buffer_while_offline = True - for ev in evs: - eventer.send(ev) - eventer.producer.raise_on_publish = False - eventer.flush() - for ev in evs: - self.assertTrue(producer.has_event(ev)) - - buf = eventer._outbound_buffer = Mock() - buf.popleft.side_effect = IndexError() - eventer.flush() - - def test_enter_exit(self): - with self.app.connection() as conn: - d = self.app.events.Dispatcher(conn) - d.close = Mock() - with d as _d: - self.assertTrue(_d) - d.close.assert_called_with() - - def test_enable_disable_callbacks(self): - on_enable = Mock() - on_disable = Mock() - with self.app.connection() as conn: - with self.app.events.Dispatcher(conn, enabled=False) as d: - d.on_enabled.add(on_enable) - d.on_disabled.add(on_disable) - d.enable() - on_enable.assert_called_with() - d.disable() - on_disable.assert_called_with() - - def test_enabled_disable(self): - connection = self.app.connection() - channel = connection.channel() - try: - dispatcher = self.app.events.Dispatcher(connection, - enabled=True) - dispatcher2 = self.app.events.Dispatcher(connection, - enabled=True, - channel=channel) - self.assertTrue(dispatcher.enabled) - self.assertTrue(dispatcher.producer.channel) - self.assertEqual(dispatcher.producer.serializer, - self.app.conf.CELERY_EVENT_SERIALIZER) - - created_channel = dispatcher.producer.channel - dispatcher.disable() - dispatcher.disable() # Disable with no active producer - dispatcher2.disable() - self.assertFalse(dispatcher.enabled) - self.assertIsNone(dispatcher.producer) - self.assertFalse(dispatcher2.channel.closed, - 'does not close manually provided channel') - - dispatcher.enable() - self.assertTrue(dispatcher.enabled) - self.assertTrue(dispatcher.producer) - - # XXX test compat attribute - self.assertIs(dispatcher.publisher, dispatcher.producer) - prev, dispatcher.publisher = dispatcher.producer, 42 - try: - self.assertEqual(dispatcher.producer, 42) - finally: - dispatcher.producer = prev - finally: - channel.close() - connection.close() - self.assertTrue(created_channel.closed) - - -class test_EventReceiver(AppCase): - - def test_process(self): - - message = {'type': 'world-war'} - - got_event = [False] - - def my_handler(event): - got_event[0] = True - - connection = Mock() - connection.transport_cls = 'memory' - r = self.app.events.Receiver( - connection, - handlers={'world-war': my_handler}, - node_id='celery.tests', - ) - r._receive(message, object()) - self.assertTrue(got_event[0]) - - def test_catch_all_event(self): - - message = {'type': 'world-war'} - - got_event = [False] - - def my_handler(event): - got_event[0] = True - - connection = Mock() - connection.transport_cls = 'memory' - r = self.app.events.Receiver(connection, node_id='celery.tests') - r.handlers['*'] = my_handler - r._receive(message, object()) - self.assertTrue(got_event[0]) - - def test_itercapture(self): - connection = self.app.connection() - try: - r = self.app.events.Receiver(connection, node_id='celery.tests') - it = r.itercapture(timeout=0.0001, wakeup=False) - - with self.assertRaises(socket.timeout): - next(it) - - with self.assertRaises(socket.timeout): - r.capture(timeout=0.00001) - finally: - connection.close() - - def test_event_from_message_localize_disabled(self): - r = self.app.events.Receiver(Mock(), node_id='celery.tests') - r.adjust_clock = Mock() - ts_adjust = Mock() - - r.event_from_message( - {'type': 'worker-online', 'clock': 313}, - localize=False, - adjust_timestamp=ts_adjust, - ) - self.assertFalse(ts_adjust.called) - r.adjust_clock.assert_called_with(313) - - def test_itercapture_limit(self): - connection = self.app.connection() - channel = connection.channel() - try: - events_received = [0] - - def handler(event): - events_received[0] += 1 - - producer = self.app.events.Dispatcher( - connection, enabled=True, channel=channel, - ) - r = self.app.events.Receiver( - connection, - handlers={'*': handler}, - node_id='celery.tests', - ) - evs = ['ev1', 'ev2', 'ev3', 'ev4', 'ev5'] - for ev in evs: - producer.send(ev) - it = r.itercapture(limit=4, wakeup=True) - next(it) # skip consumer (see itercapture) - list(it) - self.assertEqual(events_received[0], 4) - finally: - channel.close() - connection.close() - - -class test_misc(AppCase): - - def test_State(self): - state = self.app.events.State() - self.assertDictEqual(dict(state.workers), {}) - - def test_default_dispatcher(self): - with self.app.events.default_dispatcher() as d: - self.assertTrue(d) - self.assertTrue(d.connection) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_snapshot.py b/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_snapshot.py deleted file mode 100644 index f551751..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_snapshot.py +++ /dev/null @@ -1,130 +0,0 @@ -from __future__ import absolute_import - -from celery.events import Events -from celery.events.snapshot import Polaroid, evcam -from celery.tests.case import AppCase, patch, restore_logging - - -class TRef(object): - active = True - called = False - - def __call__(self): - self.called = True - - def cancel(self): - self.active = False - - -class MockTimer(object): - installed = [] - - def call_repeatedly(self, secs, fun, *args, **kwargs): - self.installed.append(fun) - return TRef() -timer = MockTimer() - - -class test_Polaroid(AppCase): - - def setup(self): - self.state = self.app.events.State() - - def test_constructor(self): - x = Polaroid(self.state, app=self.app) - self.assertIs(x.app, self.app) - self.assertIs(x.state, self.state) - self.assertTrue(x.freq) - self.assertTrue(x.cleanup_freq) - self.assertTrue(x.logger) - self.assertFalse(x.maxrate) - - def test_install_timers(self): - x = Polaroid(self.state, app=self.app) - x.timer = timer - x.__exit__() - x.__enter__() - self.assertIn(x.capture, MockTimer.installed) - self.assertIn(x.cleanup, MockTimer.installed) - self.assertTrue(x._tref.active) - self.assertTrue(x._ctref.active) - x.__exit__() - self.assertFalse(x._tref.active) - self.assertFalse(x._ctref.active) - self.assertTrue(x._tref.called) - self.assertFalse(x._ctref.called) - - def test_cleanup(self): - x = Polaroid(self.state, app=self.app) - cleanup_signal_sent = [False] - - def handler(**kwargs): - cleanup_signal_sent[0] = True - - x.cleanup_signal.connect(handler) - x.cleanup() - self.assertTrue(cleanup_signal_sent[0]) - - def test_shutter__capture(self): - x = Polaroid(self.state, app=self.app) - shutter_signal_sent = [False] - - def handler(**kwargs): - shutter_signal_sent[0] = True - - x.shutter_signal.connect(handler) - x.shutter() - self.assertTrue(shutter_signal_sent[0]) - - shutter_signal_sent[0] = False - x.capture() - self.assertTrue(shutter_signal_sent[0]) - - def test_shutter_maxrate(self): - x = Polaroid(self.state, app=self.app, maxrate='1/h') - shutter_signal_sent = [0] - - def handler(**kwargs): - shutter_signal_sent[0] += 1 - - x.shutter_signal.connect(handler) - for i in range(30): - x.shutter() - x.shutter() - x.shutter() - self.assertEqual(shutter_signal_sent[0], 1) - - -class test_evcam(AppCase): - - class MockReceiver(object): - raise_keyboard_interrupt = False - - def capture(self, **kwargs): - if self.__class__.raise_keyboard_interrupt: - raise KeyboardInterrupt() - - class MockEvents(Events): - - def Receiver(self, *args, **kwargs): - return test_evcam.MockReceiver() - - def setup(self): - self.app.events = self.MockEvents() - self.app.events.app = self.app - - def test_evcam(self): - with restore_logging(): - evcam(Polaroid, timer=timer, app=self.app) - evcam(Polaroid, timer=timer, loglevel='CRITICAL', app=self.app) - self.MockReceiver.raise_keyboard_interrupt = True - try: - with self.assertRaises(SystemExit): - evcam(Polaroid, timer=timer, app=self.app) - finally: - self.MockReceiver.raise_keyboard_interrupt = False - - @patch('celery.platforms.create_pidlock') - def test_evcam_pidfile(self, create_pidlock): - evcam(Polaroid, timer=timer, pidfile='/var/pid', app=self.app) - create_pidlock.assert_called_with('/var/pid') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_state.py b/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_state.py deleted file mode 100644 index aab54c4..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_state.py +++ /dev/null @@ -1,582 +0,0 @@ -from __future__ import absolute_import - -import pickle - -from decimal import Decimal -from random import shuffle -from time import time -from itertools import count - -from celery import states -from celery.events import Event -from celery.events.state import ( - State, - Worker, - Task, - HEARTBEAT_EXPIRE_WINDOW, - HEARTBEAT_DRIFT_MAX, -) -from celery.five import range -from celery.utils import uuid -from celery.tests.case import AppCase, Mock, patch - -try: - Decimal(2.6) -except TypeError: # pragma: no cover - # Py2.6: Must first convert float to str - _float_to_decimal = str -else: - def _float_to_decimal(f): # noqa - return f - - -class replay(object): - - def __init__(self, state): - self.state = state - self.rewind() - self.setup() - self.current_clock = 0 - - def setup(self): - pass - - def next_event(self): - ev = self.events[next(self.position)] - ev['local_received'] = ev['timestamp'] - try: - self.current_clock = ev['clock'] - except KeyError: - ev['clock'] = self.current_clock = self.current_clock + 1 - return ev - - def __iter__(self): - return self - - def __next__(self): - try: - self.state.event(self.next_event()) - except IndexError: - raise StopIteration() - next = __next__ - - def rewind(self): - self.position = count(0) - return self - - def play(self): - for _ in self: - pass - - -class ev_worker_online_offline(replay): - - def setup(self): - self.events = [ - Event('worker-online', hostname='utest1'), - Event('worker-offline', hostname='utest1'), - ] - - -class ev_worker_heartbeats(replay): - - def setup(self): - self.events = [ - Event('worker-heartbeat', hostname='utest1', - timestamp=time() - HEARTBEAT_EXPIRE_WINDOW * 2), - Event('worker-heartbeat', hostname='utest1'), - ] - - -class ev_task_states(replay): - - def setup(self): - tid = self.tid = uuid() - self.events = [ - Event('task-received', uuid=tid, name='task1', - args='(2, 2)', kwargs="{'foo': 'bar'}", - retries=0, eta=None, hostname='utest1'), - Event('task-started', uuid=tid, hostname='utest1'), - Event('task-revoked', uuid=tid, hostname='utest1'), - Event('task-retried', uuid=tid, exception="KeyError('bar')", - traceback='line 2 at main', hostname='utest1'), - Event('task-failed', uuid=tid, exception="KeyError('foo')", - traceback='line 1 at main', hostname='utest1'), - Event('task-succeeded', uuid=tid, result='4', - runtime=0.1234, hostname='utest1'), - ] - - -def QTEV(type, uuid, hostname, clock, name=None, timestamp=None): - """Quick task event.""" - return Event('task-{0}'.format(type), uuid=uuid, hostname=hostname, - clock=clock, name=name, timestamp=timestamp or time()) - - -class ev_logical_clock_ordering(replay): - - def __init__(self, state, offset=0, uids=None): - self.offset = offset or 0 - self.uids = self.setuids(uids) - super(ev_logical_clock_ordering, self).__init__(state) - - def setuids(self, uids): - uids = self.tA, self.tB, self.tC = uids or [uuid(), uuid(), uuid()] - return uids - - def setup(self): - offset = self.offset - tA, tB, tC = self.uids - self.events = [ - QTEV('received', tA, 'w1', name='tA', clock=offset + 1), - QTEV('received', tB, 'w2', name='tB', clock=offset + 1), - QTEV('started', tA, 'w1', name='tA', clock=offset + 3), - QTEV('received', tC, 'w2', name='tC', clock=offset + 3), - QTEV('started', tB, 'w2', name='tB', clock=offset + 5), - QTEV('retried', tA, 'w1', name='tA', clock=offset + 7), - QTEV('succeeded', tB, 'w2', name='tB', clock=offset + 9), - QTEV('started', tC, 'w2', name='tC', clock=offset + 10), - QTEV('received', tA, 'w3', name='tA', clock=offset + 13), - QTEV('succeded', tC, 'w2', name='tC', clock=offset + 12), - QTEV('started', tA, 'w3', name='tA', clock=offset + 14), - QTEV('succeeded', tA, 'w3', name='TA', clock=offset + 16), - ] - - def rewind_with_offset(self, offset, uids=None): - self.offset = offset - self.uids = self.setuids(uids or self.uids) - self.setup() - self.rewind() - - -class ev_snapshot(replay): - - def setup(self): - self.events = [ - Event('worker-online', hostname='utest1'), - Event('worker-online', hostname='utest2'), - Event('worker-online', hostname='utest3'), - ] - for i in range(20): - worker = not i % 2 and 'utest2' or 'utest1' - type = not i % 2 and 'task2' or 'task1' - self.events.append(Event('task-received', name=type, - uuid=uuid(), hostname=worker)) - - -class test_Worker(AppCase): - - def test_equality(self): - self.assertEqual(Worker(hostname='foo').hostname, 'foo') - self.assertEqual( - Worker(hostname='foo'), Worker(hostname='foo'), - ) - self.assertNotEqual( - Worker(hostname='foo'), Worker(hostname='bar'), - ) - self.assertEqual( - hash(Worker(hostname='foo')), hash(Worker(hostname='foo')), - ) - self.assertNotEqual( - hash(Worker(hostname='foo')), hash(Worker(hostname='bar')), - ) - - def test_compatible_with_Decimal(self): - w = Worker('george@vandelay.com') - timestamp, local_received = Decimal(_float_to_decimal(time())), time() - w.event('worker-online', timestamp, local_received, fields={ - 'hostname': 'george@vandelay.com', - 'timestamp': timestamp, - 'local_received': local_received, - 'freq': Decimal(_float_to_decimal(5.6335431)), - }) - self.assertTrue(w.alive) - - def test_survives_missing_timestamp(self): - worker = Worker(hostname='foo') - worker.event('heartbeat') - self.assertEqual(worker.heartbeats, []) - - def test_repr(self): - self.assertTrue(repr(Worker(hostname='foo'))) - - def test_drift_warning(self): - worker = Worker(hostname='foo') - with patch('celery.events.state.warn') as warn: - worker.event(None, time() + (HEARTBEAT_DRIFT_MAX * 2), time()) - self.assertTrue(warn.called) - self.assertIn('Substantial drift', warn.call_args[0][0]) - - def test_updates_heartbeat(self): - worker = Worker(hostname='foo') - worker.event(None, time(), time()) - self.assertEqual(len(worker.heartbeats), 1) - h1 = worker.heartbeats[0] - worker.event(None, time(), time() - 10) - self.assertEqual(len(worker.heartbeats), 2) - self.assertEqual(worker.heartbeats[-1], h1) - - -class test_Task(AppCase): - - def test_equality(self): - self.assertEqual(Task(uuid='foo').uuid, 'foo') - self.assertEqual( - Task(uuid='foo'), Task(uuid='foo'), - ) - self.assertNotEqual( - Task(uuid='foo'), Task(uuid='bar'), - ) - self.assertEqual( - hash(Task(uuid='foo')), hash(Task(uuid='foo')), - ) - self.assertNotEqual( - hash(Task(uuid='foo')), hash(Task(uuid='bar')), - ) - - def test_info(self): - task = Task(uuid='abcdefg', - name='tasks.add', - args='(2, 2)', - kwargs='{}', - retries=2, - result=42, - eta=1, - runtime=0.0001, - expires=1, - foo=None, - exception=1, - received=time() - 10, - started=time() - 8, - exchange='celery', - routing_key='celery', - succeeded=time()) - self.assertEqual(sorted(list(task._info_fields)), - sorted(task.info().keys())) - - self.assertEqual(sorted(list(task._info_fields + ('received', ))), - sorted(task.info(extra=('received', )))) - - self.assertEqual(sorted(['args', 'kwargs']), - sorted(task.info(['args', 'kwargs']).keys())) - self.assertFalse(list(task.info('foo'))) - - def test_ready(self): - task = Task(uuid='abcdefg', - name='tasks.add') - task.event('received', time(), time()) - self.assertFalse(task.ready) - task.event('succeeded', time(), time()) - self.assertTrue(task.ready) - - def test_sent(self): - task = Task(uuid='abcdefg', - name='tasks.add') - task.event('sent', time(), time()) - self.assertEqual(task.state, states.PENDING) - - def test_merge(self): - task = Task() - task.event('failed', time(), time()) - task.event('started', time(), time()) - task.event('received', time(), time(), { - 'name': 'tasks.add', 'args': (2, 2), - }) - self.assertEqual(task.state, states.FAILURE) - self.assertEqual(task.name, 'tasks.add') - self.assertTupleEqual(task.args, (2, 2)) - task.event('retried', time(), time()) - self.assertEqual(task.state, states.RETRY) - - def test_repr(self): - self.assertTrue(repr(Task(uuid='xxx', name='tasks.add'))) - - -class test_State(AppCase): - - def test_repr(self): - self.assertTrue(repr(State())) - - def test_pickleable(self): - self.assertTrue(pickle.loads(pickle.dumps(State()))) - - def test_task_logical_clock_ordering(self): - state = State() - r = ev_logical_clock_ordering(state) - tA, tB, tC = r.uids - r.play() - now = list(state.tasks_by_time()) - self.assertEqual(now[0][0], tA) - self.assertEqual(now[1][0], tC) - self.assertEqual(now[2][0], tB) - for _ in range(1000): - shuffle(r.uids) - tA, tB, tC = r.uids - r.rewind_with_offset(r.current_clock + 1, r.uids) - r.play() - now = list(state.tasks_by_time()) - self.assertEqual(now[0][0], tA) - self.assertEqual(now[1][0], tC) - self.assertEqual(now[2][0], tB) - - def test_worker_online_offline(self): - r = ev_worker_online_offline(State()) - next(r) - self.assertTrue(r.state.alive_workers()) - self.assertTrue(r.state.workers['utest1'].alive) - r.play() - self.assertFalse(r.state.alive_workers()) - self.assertFalse(r.state.workers['utest1'].alive) - - def test_itertasks(self): - s = State() - s.tasks = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} - self.assertEqual(len(list(s.itertasks(limit=2))), 2) - - def test_worker_heartbeat_expire(self): - r = ev_worker_heartbeats(State()) - next(r) - self.assertFalse(r.state.alive_workers()) - self.assertFalse(r.state.workers['utest1'].alive) - r.play() - self.assertTrue(r.state.alive_workers()) - self.assertTrue(r.state.workers['utest1'].alive) - - def test_task_states(self): - r = ev_task_states(State()) - - # RECEIVED - next(r) - self.assertTrue(r.tid in r.state.tasks) - task = r.state.tasks[r.tid] - self.assertEqual(task.state, states.RECEIVED) - self.assertTrue(task.received) - self.assertEqual(task.timestamp, task.received) - self.assertEqual(task.worker.hostname, 'utest1') - - # STARTED - next(r) - self.assertTrue(r.state.workers['utest1'].alive, - 'any task event adds worker heartbeat') - self.assertEqual(task.state, states.STARTED) - self.assertTrue(task.started) - self.assertEqual(task.timestamp, task.started) - self.assertEqual(task.worker.hostname, 'utest1') - - # REVOKED - next(r) - self.assertEqual(task.state, states.REVOKED) - self.assertTrue(task.revoked) - self.assertEqual(task.timestamp, task.revoked) - self.assertEqual(task.worker.hostname, 'utest1') - - # RETRY - next(r) - self.assertEqual(task.state, states.RETRY) - self.assertTrue(task.retried) - self.assertEqual(task.timestamp, task.retried) - self.assertEqual(task.worker.hostname, 'utest1') - self.assertEqual(task.exception, "KeyError('bar')") - self.assertEqual(task.traceback, 'line 2 at main') - - # FAILURE - next(r) - self.assertEqual(task.state, states.FAILURE) - self.assertTrue(task.failed) - self.assertEqual(task.timestamp, task.failed) - self.assertEqual(task.worker.hostname, 'utest1') - self.assertEqual(task.exception, "KeyError('foo')") - self.assertEqual(task.traceback, 'line 1 at main') - - # SUCCESS - next(r) - self.assertEqual(task.state, states.SUCCESS) - self.assertTrue(task.succeeded) - self.assertEqual(task.timestamp, task.succeeded) - self.assertEqual(task.worker.hostname, 'utest1') - self.assertEqual(task.result, '4') - self.assertEqual(task.runtime, 0.1234) - - def assertStateEmpty(self, state): - self.assertFalse(state.tasks) - self.assertFalse(state.workers) - self.assertFalse(state.event_count) - self.assertFalse(state.task_count) - - def assertState(self, state): - self.assertTrue(state.tasks) - self.assertTrue(state.workers) - self.assertTrue(state.event_count) - self.assertTrue(state.task_count) - - def test_freeze_while(self): - s = State() - r = ev_snapshot(s) - r.play() - - def work(): - pass - - s.freeze_while(work, clear_after=True) - self.assertFalse(s.event_count) - - s2 = State() - r = ev_snapshot(s2) - r.play() - s2.freeze_while(work, clear_after=False) - self.assertTrue(s2.event_count) - - def test_clear_tasks(self): - s = State() - r = ev_snapshot(s) - r.play() - self.assertTrue(s.tasks) - s.clear_tasks(ready=False) - self.assertFalse(s.tasks) - - def test_clear(self): - r = ev_snapshot(State()) - r.play() - self.assertTrue(r.state.event_count) - self.assertTrue(r.state.workers) - self.assertTrue(r.state.tasks) - self.assertTrue(r.state.task_count) - - r.state.clear() - self.assertFalse(r.state.event_count) - self.assertFalse(r.state.workers) - self.assertTrue(r.state.tasks) - self.assertFalse(r.state.task_count) - - r.state.clear(False) - self.assertFalse(r.state.tasks) - - def test_task_types(self): - r = ev_snapshot(State()) - r.play() - self.assertEqual(sorted(r.state.task_types()), ['task1', 'task2']) - - def test_tasks_by_timestamp(self): - r = ev_snapshot(State()) - r.play() - self.assertEqual(len(list(r.state.tasks_by_timestamp())), 20) - - def test_tasks_by_type(self): - r = ev_snapshot(State()) - r.play() - self.assertEqual(len(list(r.state.tasks_by_type('task1'))), 10) - self.assertEqual(len(list(r.state.tasks_by_type('task2'))), 10) - - def test_alive_workers(self): - r = ev_snapshot(State()) - r.play() - self.assertEqual(len(r.state.alive_workers()), 3) - - def test_tasks_by_worker(self): - r = ev_snapshot(State()) - r.play() - self.assertEqual(len(list(r.state.tasks_by_worker('utest1'))), 10) - self.assertEqual(len(list(r.state.tasks_by_worker('utest2'))), 10) - - def test_survives_unknown_worker_event(self): - s = State() - s.event({ - 'type': 'worker-unknown-event-xxx', - 'foo': 'bar', - }) - s.event({ - 'type': 'worker-unknown-event-xxx', - 'hostname': 'xxx', - 'foo': 'bar', - }) - - def test_survives_unknown_worker_leaving(self): - s = State(on_node_leave=Mock(name='on_node_leave')) - (worker, created), subject = s.event({ - 'type': 'worker-offline', - 'hostname': 'unknown@vandelay.com', - 'timestamp': time(), - 'local_received': time(), - 'clock': 301030134894833, - }) - self.assertEqual(worker, Worker('unknown@vandelay.com')) - self.assertFalse(created) - self.assertEqual(subject, 'offline') - self.assertNotIn('unknown@vandelay.com', s.workers) - s.on_node_leave.assert_called_with(worker) - - def test_on_node_join_callback(self): - s = State(on_node_join=Mock(name='on_node_join')) - (worker, created), subject = s.event({ - 'type': 'worker-online', - 'hostname': 'george@vandelay.com', - 'timestamp': time(), - 'local_received': time(), - 'clock': 34314, - }) - self.assertTrue(worker) - self.assertTrue(created) - self.assertEqual(subject, 'online') - self.assertIn('george@vandelay.com', s.workers) - s.on_node_join.assert_called_with(worker) - - def test_survives_unknown_task_event(self): - s = State() - s.event( - { - 'type': 'task-unknown-event-xxx', - 'foo': 'bar', - 'uuid': 'x', - 'hostname': 'y', - 'timestamp': time(), - 'local_received': time(), - 'clock': 0, - }, - ) - - def test_limits_maxtasks(self): - s = State(max_tasks_in_memory=1) - s.heap_multiplier = 2 - s.event({ - 'type': 'task-unknown-event-xxx', - 'foo': 'bar', - 'uuid': 'x', - 'hostname': 'y', - 'clock': 3, - 'timestamp': time(), - 'local_received': time(), - }) - s.event({ - 'type': 'task-unknown-event-xxx', - 'foo': 'bar', - 'uuid': 'y', - 'hostname': 'y', - 'clock': 4, - 'timestamp': time(), - 'local_received': time(), - }) - s.event({ - 'type': 'task-unknown-event-xxx', - 'foo': 'bar', - 'uuid': 'z', - 'hostname': 'y', - 'clock': 5, - 'timestamp': time(), - 'local_received': time(), - }) - self.assertEqual(len(s._taskheap), 2) - self.assertEqual(s._taskheap[0].clock, 4) - self.assertEqual(s._taskheap[1].clock, 5) - - s._taskheap.append(s._taskheap[0]) - self.assertTrue(list(s.tasks_by_time())) - - def test_callback(self): - scratch = {} - - def callback(state, event): - scratch['recv'] = True - - s = State(callback=callback) - s.event({'type': 'worker-online'}) - self.assertTrue(scratch.get('recv')) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/fixups/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/fixups/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/fixups/test_django.py b/thesisenv/lib/python3.6/site-packages/celery/tests/fixups/test_django.py deleted file mode 100644 index 94b755e..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/fixups/test_django.py +++ /dev/null @@ -1,301 +0,0 @@ -from __future__ import absolute_import - -import os - -from contextlib import contextmanager - -from celery.fixups.django import ( - _maybe_close_fd, - fixup, - DjangoFixup, - DjangoWorkerFixup, -) - -from celery.tests.case import ( - AppCase, Mock, patch, patch_many, patch_modules, mask_modules, -) - - -class FixupCase(AppCase): - Fixup = None - - @contextmanager - def fixup_context(self, app): - with patch('celery.fixups.django.DjangoWorkerFixup.validate_models'): - with patch('celery.fixups.django.symbol_by_name') as symbyname: - with patch('celery.fixups.django.import_module') as impmod: - f = self.Fixup(app) - yield f, impmod, symbyname - - -class test_DjangoFixup(FixupCase): - Fixup = DjangoFixup - - def test_fixup(self): - with patch('celery.fixups.django.DjangoFixup') as Fixup: - with patch.dict(os.environ, DJANGO_SETTINGS_MODULE=''): - fixup(self.app) - self.assertFalse(Fixup.called) - with patch.dict(os.environ, DJANGO_SETTINGS_MODULE='settings'): - with mask_modules('django'): - with self.assertWarnsRegex(UserWarning, 'but Django is'): - fixup(self.app) - self.assertFalse(Fixup.called) - with patch_modules('django'): - fixup(self.app) - self.assertTrue(Fixup.called) - - def test_maybe_close_fd(self): - with patch('os.close'): - _maybe_close_fd(Mock()) - _maybe_close_fd(object()) - - def test_init(self): - with self.fixup_context(self.app) as (f, importmod, sym): - self.assertTrue(f) - - def se(name): - if name == 'django.utils.timezone:now': - raise ImportError() - return Mock() - sym.side_effect = se - self.assertTrue(self.Fixup(self.app)._now) - - def test_install(self): - self.app.loader = Mock() - with self.fixup_context(self.app) as (f, _, _): - with patch_many('os.getcwd', 'sys.path', - 'celery.fixups.django.signals') as (cw, p, sigs): - cw.return_value = '/opt/vandelay' - f.install() - sigs.worker_init.connect.assert_called_with(f.on_worker_init) - self.assertEqual(self.app.loader.now, f.now) - self.assertEqual(self.app.loader.mail_admins, f.mail_admins) - p.append.assert_called_with('/opt/vandelay') - - def test_now(self): - with self.fixup_context(self.app) as (f, _, _): - self.assertTrue(f.now(utc=True)) - self.assertFalse(f._now.called) - self.assertTrue(f.now(utc=False)) - self.assertTrue(f._now.called) - - def test_mail_admins(self): - with self.fixup_context(self.app) as (f, _, _): - f.mail_admins('sub', 'body', True) - f._mail_admins.assert_called_with( - 'sub', 'body', fail_silently=True, - ) - - def test_on_worker_init(self): - with self.fixup_context(self.app) as (f, _, _): - with patch('celery.fixups.django.DjangoWorkerFixup') as DWF: - f.on_worker_init() - DWF.assert_called_with(f.app) - DWF.return_value.install.assert_called_with() - self.assertIs(f._worker_fixup, DWF.return_value) - - -class test_DjangoWorkerFixup(FixupCase): - Fixup = DjangoWorkerFixup - - def test_init(self): - with self.fixup_context(self.app) as (f, importmod, sym): - self.assertTrue(f) - - def se(name): - if name == 'django.db:close_old_connections': - raise ImportError() - return Mock() - sym.side_effect = se - self.assertIsNone(self.Fixup(self.app)._close_old_connections) - - def test_install(self): - self.app.conf = {'CELERY_DB_REUSE_MAX': None} - self.app.loader = Mock() - with self.fixup_context(self.app) as (f, _, _): - with patch_many('celery.fixups.django.signals') as (sigs, ): - f.install() - sigs.beat_embedded_init.connect.assert_called_with( - f.close_database, - ) - sigs.worker_ready.connect.assert_called_with(f.on_worker_ready) - sigs.task_prerun.connect.assert_called_with(f.on_task_prerun) - sigs.task_postrun.connect.assert_called_with(f.on_task_postrun) - sigs.worker_process_init.connect.assert_called_with( - f.on_worker_process_init, - ) - - def test_on_worker_process_init(self): - with self.fixup_context(self.app) as (f, _, _): - with patch('celery.fixups.django._maybe_close_fd') as mcf: - _all = f._db.connections.all = Mock() - conns = _all.return_value = [ - Mock(), Mock(), - ] - conns[0].connection = None - with patch.object(f, 'close_cache'): - with patch.object(f, '_close_database'): - f.on_worker_process_init() - mcf.assert_called_with(conns[1].connection) - f.close_cache.assert_called_with() - f._close_database.assert_called_with() - - mcf.reset_mock() - _all.side_effect = AttributeError() - f.on_worker_process_init() - mcf.assert_called_with(f._db.connection.connection) - f._db.connection = None - f.on_worker_process_init() - - def test_on_task_prerun(self): - task = Mock() - with self.fixup_context(self.app) as (f, _, _): - task.request.is_eager = False - with patch.object(f, 'close_database'): - f.on_task_prerun(task) - f.close_database.assert_called_with() - - task.request.is_eager = True - with patch.object(f, 'close_database'): - f.on_task_prerun(task) - self.assertFalse(f.close_database.called) - - def test_on_task_postrun(self): - task = Mock() - with self.fixup_context(self.app) as (f, _, _): - with patch.object(f, 'close_cache'): - task.request.is_eager = False - with patch.object(f, 'close_database'): - f.on_task_postrun(task) - self.assertTrue(f.close_database.called) - self.assertTrue(f.close_cache.called) - - # when a task is eager, do not close connections - with patch.object(f, 'close_cache'): - task.request.is_eager = True - with patch.object(f, 'close_database'): - f.on_task_postrun(task) - self.assertFalse(f.close_database.called) - self.assertFalse(f.close_cache.called) - - def test_close_database(self): - with self.fixup_context(self.app) as (f, _, _): - f._close_old_connections = Mock() - f.close_database() - f._close_old_connections.assert_called_with() - f._close_old_connections = None - with patch.object(f, '_close_database') as _close: - f.db_reuse_max = None - f.close_database() - _close.assert_called_with() - _close.reset_mock() - - f.db_reuse_max = 10 - f._db_recycles = 3 - f.close_database() - self.assertFalse(_close.called) - self.assertEqual(f._db_recycles, 4) - _close.reset_mock() - - f._db_recycles = 20 - f.close_database() - _close.assert_called_with() - self.assertEqual(f._db_recycles, 1) - - def test__close_database(self): - with self.fixup_context(self.app) as (f, _, _): - conns = [Mock(), Mock(), Mock()] - conns[1].close.side_effect = KeyError('already closed') - f.database_errors = (KeyError, ) - - f._db.connections = Mock() # ConnectionHandler - f._db.connections.all.side_effect = lambda: conns - - f._close_database() - conns[0].close.assert_called_with() - conns[1].close.assert_called_with() - conns[2].close.assert_called_with() - - conns[1].close.side_effect = KeyError('omg') - with self.assertRaises(KeyError): - f._close_database() - - class Object(object): - pass - o = Object() - o.close_connection = Mock() - f._db = o - f._close_database() - o.close_connection.assert_called_with() - - def test_close_cache(self): - with self.fixup_context(self.app) as (f, _, _): - f.close_cache() - f._cache.cache.close.assert_called_with() - f._cache.cache.close.side_effect = TypeError() - f.close_cache() - - def test_on_worker_ready(self): - with self.fixup_context(self.app) as (f, _, _): - f._settings.DEBUG = False - f.on_worker_ready() - with self.assertWarnsRegex(UserWarning, r'leads to a memory leak'): - f._settings.DEBUG = True - f.on_worker_ready() - - def test_mysql_errors(self): - with patch_modules('MySQLdb'): - import MySQLdb as mod - mod.DatabaseError = Mock() - mod.InterfaceError = Mock() - mod.OperationalError = Mock() - with self.fixup_context(self.app) as (f, _, _): - self.assertIn(mod.DatabaseError, f.database_errors) - self.assertIn(mod.InterfaceError, f.database_errors) - self.assertIn(mod.OperationalError, f.database_errors) - with mask_modules('MySQLdb'): - with self.fixup_context(self.app): - pass - - def test_pg_errors(self): - with patch_modules('psycopg2'): - import psycopg2 as mod - mod.DatabaseError = Mock() - mod.InterfaceError = Mock() - mod.OperationalError = Mock() - with self.fixup_context(self.app) as (f, _, _): - self.assertIn(mod.DatabaseError, f.database_errors) - self.assertIn(mod.InterfaceError, f.database_errors) - self.assertIn(mod.OperationalError, f.database_errors) - with mask_modules('psycopg2'): - with self.fixup_context(self.app): - pass - - def test_sqlite_errors(self): - with patch_modules('sqlite3'): - import sqlite3 as mod - mod.DatabaseError = Mock() - mod.InterfaceError = Mock() - mod.OperationalError = Mock() - with self.fixup_context(self.app) as (f, _, _): - self.assertIn(mod.DatabaseError, f.database_errors) - self.assertIn(mod.InterfaceError, f.database_errors) - self.assertIn(mod.OperationalError, f.database_errors) - with mask_modules('sqlite3'): - with self.fixup_context(self.app): - pass - - def test_oracle_errors(self): - with patch_modules('cx_Oracle'): - import cx_Oracle as mod - mod.DatabaseError = Mock() - mod.InterfaceError = Mock() - mod.OperationalError = Mock() - with self.fixup_context(self.app) as (f, _, _): - self.assertIn(mod.DatabaseError, f.database_errors) - self.assertIn(mod.InterfaceError, f.database_errors) - self.assertIn(mod.OperationalError, f.database_errors) - with mask_modules('cx_Oracle'): - with self.fixup_context(self.app): - pass diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/functional/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/functional/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/functional/case.py b/thesisenv/lib/python3.6/site-packages/celery/tests/functional/case.py deleted file mode 100644 index 298c684..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/functional/case.py +++ /dev/null @@ -1,178 +0,0 @@ -from __future__ import absolute_import - -import atexit -import logging -import os -import signal -import socket -import sys -import traceback - -from itertools import count -from time import time - -from celery import current_app -from celery.exceptions import TimeoutError -from celery.app.control import flatten_reply -from celery.utils.imports import qualname - -from celery.tests.case import Case - -HOSTNAME = socket.gethostname() - - -def say(msg): - sys.stderr.write('%s\n' % msg) - - -def try_while(fun, reason='Timed out', timeout=10, interval=0.5): - time_start = time() - for iterations in count(0): - if time() - time_start >= timeout: - raise TimeoutError() - ret = fun() - if ret: - return ret - - -class Worker(object): - started = False - worker_ids = count(1) - _shutdown_called = False - - def __init__(self, hostname, loglevel='error', app=None): - self.hostname = hostname - self.loglevel = loglevel - self.app = app or current_app._get_current_object() - - def start(self): - if not self.started: - self._fork_and_exec() - self.started = True - - def _fork_and_exec(self): - pid = os.fork() - if pid == 0: - self.app.worker_main(['worker', '--loglevel=INFO', - '-n', self.hostname, - '-P', 'solo']) - os._exit(0) - self.pid = pid - - def ping(self, *args, **kwargs): - return self.app.control.ping(*args, **kwargs) - - def is_alive(self, timeout=1): - r = self.ping(destination=[self.hostname], timeout=timeout) - return self.hostname in flatten_reply(r) - - def wait_until_started(self, timeout=10, interval=0.5): - try_while( - lambda: self.is_alive(interval), - "Worker won't start (after %s secs.)" % timeout, - interval=interval, timeout=timeout, - ) - say('--WORKER %s IS ONLINE--' % self.hostname) - - def ensure_shutdown(self, timeout=10, interval=0.5): - os.kill(self.pid, signal.SIGTERM) - try_while( - lambda: not self.is_alive(interval), - "Worker won't shutdown (after %s secs.)" % timeout, - timeout=10, interval=0.5, - ) - say('--WORKER %s IS SHUTDOWN--' % self.hostname) - self._shutdown_called = True - - def ensure_started(self): - self.start() - self.wait_until_started() - - @classmethod - def managed(cls, hostname=None, caller=None): - hostname = hostname or socket.gethostname() - if caller: - hostname = '.'.join([qualname(caller), hostname]) - else: - hostname += str(next(cls.worker_ids())) - worker = cls(hostname) - worker.ensure_started() - stack = traceback.format_stack() - - @atexit.register - def _ensure_shutdown_once(): - if not worker._shutdown_called: - say('-- Found worker not stopped at shutdown: %s\n%s' % ( - worker.hostname, - '\n'.join(stack))) - worker.ensure_shutdown() - - return worker - - -class WorkerCase(Case): - hostname = HOSTNAME - worker = None - - @classmethod - def setUpClass(cls): - logging.getLogger('amqp').setLevel(logging.ERROR) - cls.worker = Worker.managed(cls.hostname, caller=cls) - - @classmethod - def tearDownClass(cls): - cls.worker.ensure_shutdown() - - def assertWorkerAlive(self, timeout=1): - self.assertTrue(self.worker.is_alive) - - def inspect(self, timeout=1): - return self.app.control.inspect([self.worker.hostname], - timeout=timeout) - - def my_response(self, response): - return flatten_reply(response)[self.worker.hostname] - - def is_accepted(self, task_id, interval=0.5): - active = self.inspect(timeout=interval).active() - if active: - for task in active[self.worker.hostname]: - if task['id'] == task_id: - return True - return False - - def is_reserved(self, task_id, interval=0.5): - reserved = self.inspect(timeout=interval).reserved() - if reserved: - for task in reserved[self.worker.hostname]: - if task['id'] == task_id: - return True - return False - - def is_scheduled(self, task_id, interval=0.5): - schedule = self.inspect(timeout=interval).scheduled() - if schedule: - for item in schedule[self.worker.hostname]: - if item['request']['id'] == task_id: - return True - return False - - def is_received(self, task_id, interval=0.5): - return (self.is_reserved(task_id, interval) or - self.is_scheduled(task_id, interval) or - self.is_accepted(task_id, interval)) - - def ensure_accepted(self, task_id, interval=0.5, timeout=10): - return try_while(lambda: self.is_accepted(task_id, interval), - 'Task not accepted within timeout', - interval=0.5, timeout=10) - - def ensure_received(self, task_id, interval=0.5, timeout=10): - return try_while(lambda: self.is_received(task_id, interval), - 'Task not receied within timeout', - interval=0.5, timeout=10) - - def ensure_scheduled(self, task_id, interval=0.5, timeout=10): - return try_while(lambda: self.is_scheduled(task_id, interval), - 'Task not scheduled within timeout', - interval=0.5, timeout=10) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/functional/tasks.py b/thesisenv/lib/python3.6/site-packages/celery/tests/functional/tasks.py deleted file mode 100644 index 85479b4..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/functional/tasks.py +++ /dev/null @@ -1,24 +0,0 @@ -from __future__ import absolute_import - -import time - -from celery import task, signature - - -@task() -def add(x, y): - return x + y - - -@task() -def add_cb(x, y, callback=None): - result = x + y - if callback: - return signature(callback).apply_async(result) - return result - - -@task() -def sleeptask(i): - time.sleep(i) - return i diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/security/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/security/__init__.py deleted file mode 100644 index 50b7f4c..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/security/__init__.py +++ /dev/null @@ -1,68 +0,0 @@ -from __future__ import absolute_import -""" -Keys and certificates for tests (KEY1 is a private key of CERT1, etc.) - -Generated with `extra/security/get-cert.sh` - -""" -KEY1 = """-----BEGIN RSA PRIVATE KEY----- -MIICXQIBAAKBgQC9Twh0V5q/R1Q8N+Y+CNM4lj9AXeZL0gYowoK1ht2ZLCDU9vN5 -dhV0x3sqaXLjQNeCGd6b2vTbFGdF2E45//IWz6/BdPFWaPm0rtYbcxZHqXDZScRp -vFDLHhMysdqQWHxXVxpqIXXo4B7bnfnGvXhYwYITeEyQylV/rnH53mdV8wIDAQAB -AoGBAKUJN4elr+S9nHP7D6BZNTsJ0Q6eTd0ftfrmx+jVMG8Oh3jh6ZSkG0R5e6iX -0W7I4pgrUWRyWDB98yJy1o+90CAN/D80o8SbmW/zfA2WLBteOujMfCEjNrc/Nodf -6MZ0QQ6PnPH6pp94i3kNmFD8Mlzm+ODrUjPF0dCNf474qeKhAkEA7SXj5cQPyQXM -s15oGX5eb6VOk96eAPtEC72cLSh6o+VYmXyGroV1A2JPm6IzH87mTqjWXG229hjt -XVvDbdY2uQJBAMxblWFaWJhhU6Y1euazaBl/OyLYlqNz4LZ0RzCulEoV/gMGYU32 -PbilD5fpFsyhp5oCxnWNEsUFovYMKjKM3AsCQQCIlOcBoP76ZxWzRK8t56MaKBnu -fiuAIzbYkDbPp12i4Wc61wZ2ozR2Y3u4Bh3tturb6M+04hea+1ZSC5StwM85AkAp -UPLYpe13kWXaGsHoVqlbTk/kcamzDkCGYufpvcIZYGzkq6uMmZZM+II4klWbtasv -BhSdu5Hp54PU/wyg/72VAkBy1/oM3/QJ35Vb6TByHBLFR4nOuORoRclmxcoCPva9 -xqkQQn+UgBtOemRXpFCuKaoXonA3nLeB54SWcC6YUOcR ------END RSA PRIVATE KEY-----""" - -KEY2 = """-----BEGIN RSA PRIVATE KEY----- -MIICXQIBAAKBgQDH22L8b9AmST9ABDmQTQ2DWMdDmK5YXZt4AIY81IcsTQ/ccM0C -fwXEP9tdkYwtcxMCWdASwY5pfMy9vFp0hyrRQMSNfuoxAgONuNWPyQoIvY3ZXRe6 -rS+hb/LN4+vdjX+oxmYiQ2HmSB9rh2bepE6Cw+RLJr5sXXq+xZJ+BLt5tQIDAQAB -AoGBAMGBO0Arip/nP6Rd8tYypKjN5nEefX/1cjgoWdC//fj4zCil1vlZv12abm0U -JWNEDd2y0/G1Eow0V5BFtFcrIFowU44LZEiSf7sKXlNHRHlbZmDgNXFZOt7nVbHn -6SN+oCYjaPjji8idYeb3VQXPtqMoMn73MuyxD3k3tWmVLonpAkEA6hsu62qhUk5k -Nt88UZOauU1YizxsWvT0bHioaceE4TEsbO3NZs7dmdJIcRFcU787lANaaIq7Rw26 -qcumME9XhwJBANqMOzsYQ6BX54UzS6x99Jjlq9MEbTCbAEZr/yjopb9f617SwfuE -AEKnIq3HL6/Tnhv3V8Zy3wYHgDoGNeTVe+MCQQDi/nyeNAQ8RFqTgh2Ak/jAmCi0 -yV/fSgj+bHgQKS/FEuMas/IoL4lbrzQivkyhv5lLSX0ORQaWPM+z+A0qZqRdAkBh -XE+Wx/x4ljCh+nQf6AzrgIXHgBVUrfi1Zq9Jfjs4wnaMy793WRr0lpiwaigoYFHz -i4Ei+1G30eeh8dpYk3KZAkB0ucTOsQynDlL5rLGYZ+IcfSfH3w2l5EszY47kKQG9 -Fxeq/HOp9JYw4gRu6Ycvqu57KHwpHhR0FCXRBxuYcJ5V ------END RSA PRIVATE KEY-----""" - -CERT1 = """-----BEGIN CERTIFICATE----- -MIICVzCCAcACCQC72PP7b7H9BTANBgkqhkiG9w0BAQUFADBwMQswCQYDVQQGEwJV -UzELMAkGA1UECBMCQ0ExCzAJBgNVBAcTAlNGMQ8wDQYDVQQKEwZDZWxlcnkxDzAN -BgNVBAMTBkNFbGVyeTElMCMGCSqGSIb3DQEJARYWY2VydEBjZWxlcnlwcm9qZWN0 -Lm9yZzAeFw0xMzA3MjQxMjExMTRaFw0xNDA3MjQxMjExMTRaMHAxCzAJBgNVBAYT -AlVTMQswCQYDVQQIEwJDQTELMAkGA1UEBxMCU0YxDzANBgNVBAoTBkNlbGVyeTEP -MA0GA1UEAxMGQ0VsZXJ5MSUwIwYJKoZIhvcNAQkBFhZjZXJ0QGNlbGVyeXByb2pl -Y3Qub3JnMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC9Twh0V5q/R1Q8N+Y+ -CNM4lj9AXeZL0gYowoK1ht2ZLCDU9vN5dhV0x3sqaXLjQNeCGd6b2vTbFGdF2E45 -//IWz6/BdPFWaPm0rtYbcxZHqXDZScRpvFDLHhMysdqQWHxXVxpqIXXo4B7bnfnG -vXhYwYITeEyQylV/rnH53mdV8wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAKA4tD3J -94tsnQxFxHP7Frt7IvGMH+3wMqOiXFgYxPJX2tyaPvOLJ/7ERE4MkrvZO7IRC0iA -yKBe0pucdrTgsJoDV8juahuyjXOjvU14+q7Wv7pj7zqddVavzK8STLX4/FMIDnbK -aMGJl7wyj6V2yy6ANSbmy0uQjHikI6DrZEoK ------END CERTIFICATE-----""" - -CERT2 = """-----BEGIN CERTIFICATE----- -MIICATCCAWoCCQCV/9A2ZBM37TANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB -VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0 -cyBQdHkgTHRkMB4XDTExMDcxOTA5MDkwMloXDTEyMDcxODA5MDkwMlowRTELMAkG -A1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0 -IFdpZGdpdHMgUHR5IEx0ZDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAx9ti -/G/QJkk/QAQ5kE0Ng1jHQ5iuWF2beACGPNSHLE0P3HDNAn8FxD/bXZGMLXMTAlnQ -EsGOaXzMvbxadIcq0UDEjX7qMQIDjbjVj8kKCL2N2V0Xuq0voW/yzePr3Y1/qMZm -IkNh5kgfa4dm3qROgsPkSya+bF16vsWSfgS7ebUCAwEAATANBgkqhkiG9w0BAQUF -AAOBgQBzaZ5vBkzksPhnWb2oobuy6Ne/LMEtdQ//qeVY4sKl2tOJUCSdWRen9fqP -e+zYdEdkFCd8rp568Eiwkq/553uy4rlE927/AEqs/+KGYmAtibk/9vmi+/+iZXyS -WWZybzzDZFncq1/N1C3Y/hrCBNDFO4TsnTLAhWtZ4c0vDAiacw== ------END CERTIFICATE-----""" diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/security/case.py b/thesisenv/lib/python3.6/site-packages/celery/tests/security/case.py deleted file mode 100644 index ba421a9..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/security/case.py +++ /dev/null @@ -1,16 +0,0 @@ -from __future__ import absolute_import - -from celery.tests.case import AppCase, SkipTest - -import sys - - -class SecurityCase(AppCase): - - def setup(self): - if sys.version_info[0] == 3: - raise SkipTest('PyOpenSSL does not work on Python 3') - try: - from OpenSSL import crypto # noqa - except ImportError: - raise SkipTest('OpenSSL.crypto not installed') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_certificate.py b/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_certificate.py deleted file mode 100644 index 6e153bd..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_certificate.py +++ /dev/null @@ -1,78 +0,0 @@ -from __future__ import absolute_import - -from celery.exceptions import SecurityError -from celery.security.certificate import Certificate, CertStore, FSCertStore - -from . import CERT1, CERT2, KEY1 -from .case import SecurityCase - -from celery.tests.case import Mock, SkipTest, mock_open, patch - - -class test_Certificate(SecurityCase): - - def test_valid_certificate(self): - Certificate(CERT1) - Certificate(CERT2) - - def test_invalid_certificate(self): - self.assertRaises((SecurityError, TypeError), Certificate, None) - self.assertRaises(SecurityError, Certificate, '') - self.assertRaises(SecurityError, Certificate, 'foo') - self.assertRaises(SecurityError, Certificate, CERT1[:20] + CERT1[21:]) - self.assertRaises(SecurityError, Certificate, KEY1) - - def test_has_expired(self): - raise SkipTest('cert actually expired') - self.assertFalse(Certificate(CERT1).has_expired()) - - -class test_CertStore(SecurityCase): - - def test_itercerts(self): - cert1 = Certificate(CERT1) - cert2 = Certificate(CERT2) - certstore = CertStore() - for c in certstore.itercerts(): - self.assertTrue(False) - certstore.add_cert(cert1) - certstore.add_cert(cert2) - for c in certstore.itercerts(): - self.assertIn(c, (cert1, cert2)) - - def test_duplicate(self): - cert1 = Certificate(CERT1) - certstore = CertStore() - certstore.add_cert(cert1) - self.assertRaises(SecurityError, certstore.add_cert, cert1) - - -class test_FSCertStore(SecurityCase): - - @patch('os.path.isdir') - @patch('glob.glob') - @patch('celery.security.certificate.Certificate') - def test_init(self, Certificate, glob, isdir): - cert = Certificate.return_value = Mock() - cert.has_expired.return_value = False - isdir.return_value = True - glob.return_value = ['foo.cert'] - with mock_open(): - cert.get_id.return_value = 1 - x = FSCertStore('/var/certs') - self.assertIn(1, x._certs) - glob.assert_called_with('/var/certs/*') - - # they both end up with the same id - glob.return_value = ['foo.cert', 'bar.cert'] - with self.assertRaises(SecurityError): - x = FSCertStore('/var/certs') - glob.return_value = ['foo.cert'] - - cert.has_expired.return_value = True - with self.assertRaises(SecurityError): - x = FSCertStore('/var/certs') - - isdir.return_value = False - with self.assertRaises(SecurityError): - x = FSCertStore('/var/certs') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_key.py b/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_key.py deleted file mode 100644 index d8551b2..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_key.py +++ /dev/null @@ -1,26 +0,0 @@ -from __future__ import absolute_import - -from celery.exceptions import SecurityError -from celery.security.key import PrivateKey - -from . import CERT1, KEY1, KEY2 -from .case import SecurityCase - - -class test_PrivateKey(SecurityCase): - - def test_valid_private_key(self): - PrivateKey(KEY1) - PrivateKey(KEY2) - - def test_invalid_private_key(self): - self.assertRaises((SecurityError, TypeError), PrivateKey, None) - self.assertRaises(SecurityError, PrivateKey, '') - self.assertRaises(SecurityError, PrivateKey, 'foo') - self.assertRaises(SecurityError, PrivateKey, KEY1[:20] + KEY1[21:]) - self.assertRaises(SecurityError, PrivateKey, CERT1) - - def test_sign(self): - pkey = PrivateKey(KEY1) - pkey.sign('test', 'sha1') - self.assertRaises(ValueError, pkey.sign, 'test', 'unknown') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_security.py b/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_security.py deleted file mode 100644 index 227c65a..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_security.py +++ /dev/null @@ -1,110 +0,0 @@ -""" -Keys and certificates for tests (KEY1 is a private key of CERT1, etc.) - -Generated with: - -.. code-block:: bash - - $ openssl genrsa -des3 -passout pass:test -out key1.key 1024 - $ openssl req -new -key key1.key -out key1.csr -passin pass:test - $ cp key1.key key1.key.org - $ openssl rsa -in key1.key.org -out key1.key -passin pass:test - $ openssl x509 -req -days 365 -in cert1.csr \ - -signkey key1.key -out cert1.crt - $ rm key1.key.org cert1.csr - -""" -from __future__ import absolute_import - -from kombu.serialization import disable_insecure_serializers - -from celery.exceptions import ImproperlyConfigured, SecurityError -from celery.five import builtins -from celery.security.utils import reraise_errors -from kombu.serialization import registry - -from .case import SecurityCase - -from celery.tests.case import Mock, mock_open, patch - - -class test_security(SecurityCase): - - def teardown(self): - registry._disabled_content_types.clear() - - def test_disable_insecure_serializers(self): - try: - disabled = registry._disabled_content_types - self.assertTrue(disabled) - - disable_insecure_serializers( - ['application/json', 'application/x-python-serialize'], - ) - self.assertIn('application/x-yaml', disabled) - self.assertNotIn('application/json', disabled) - self.assertNotIn('application/x-python-serialize', disabled) - disabled.clear() - - disable_insecure_serializers(allowed=None) - self.assertIn('application/x-yaml', disabled) - self.assertIn('application/json', disabled) - self.assertIn('application/x-python-serialize', disabled) - finally: - disable_insecure_serializers(allowed=['json']) - - def test_setup_security(self): - disabled = registry._disabled_content_types - self.assertEqual(0, len(disabled)) - - self.app.conf.CELERY_TASK_SERIALIZER = 'json' - self.app.setup_security() - self.assertIn('application/x-python-serialize', disabled) - disabled.clear() - - @patch('celery.security.register_auth') - @patch('celery.security._disable_insecure_serializers') - def test_setup_registry_complete(self, dis, reg, key='KEY', cert='CERT'): - calls = [0] - - def effect(*args): - try: - m = Mock() - m.read.return_value = 'B' if calls[0] else 'A' - return m - finally: - calls[0] += 1 - - self.app.conf.CELERY_TASK_SERIALIZER = 'auth' - with mock_open(side_effect=effect): - with patch('celery.security.registry') as registry: - store = Mock() - self.app.setup_security(['json'], key, cert, store) - dis.assert_called_with(['json']) - reg.assert_called_with('A', 'B', store, 'sha1', 'json') - registry._set_default_serializer.assert_called_with('auth') - - def test_security_conf(self): - self.app.conf.CELERY_TASK_SERIALIZER = 'auth' - with self.assertRaises(ImproperlyConfigured): - self.app.setup_security() - - _import = builtins.__import__ - - def import_hook(name, *args, **kwargs): - if name == 'OpenSSL': - raise ImportError - return _import(name, *args, **kwargs) - - builtins.__import__ = import_hook - with self.assertRaises(ImproperlyConfigured): - self.app.setup_security() - builtins.__import__ = _import - - def test_reraise_errors(self): - with self.assertRaises(SecurityError): - with reraise_errors(errors=(KeyError, )): - raise KeyError('foo') - with self.assertRaises(KeyError): - with reraise_errors(errors=(ValueError, )): - raise KeyError('bar') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_serialization.py b/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_serialization.py deleted file mode 100644 index 50bc4bf..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_serialization.py +++ /dev/null @@ -1,64 +0,0 @@ -from __future__ import absolute_import - -import os -import base64 - -from kombu.serialization import registry - -from celery.exceptions import SecurityError -from celery.security.serialization import SecureSerializer, register_auth -from celery.security.certificate import Certificate, CertStore -from celery.security.key import PrivateKey - -from . import CERT1, CERT2, KEY1, KEY2 -from .case import SecurityCase - - -class test_SecureSerializer(SecurityCase): - - def _get_s(self, key, cert, certs): - store = CertStore() - for c in certs: - store.add_cert(Certificate(c)) - return SecureSerializer(PrivateKey(key), Certificate(cert), store) - - def test_serialize(self): - s = self._get_s(KEY1, CERT1, [CERT1]) - self.assertEqual(s.deserialize(s.serialize('foo')), 'foo') - - def test_deserialize(self): - s = self._get_s(KEY1, CERT1, [CERT1]) - self.assertRaises(SecurityError, s.deserialize, 'bad data') - - def test_unmatched_key_cert(self): - s = self._get_s(KEY1, CERT2, [CERT1, CERT2]) - self.assertRaises(SecurityError, - s.deserialize, s.serialize('foo')) - - def test_unknown_source(self): - s1 = self._get_s(KEY1, CERT1, [CERT2]) - s2 = self._get_s(KEY1, CERT1, []) - self.assertRaises(SecurityError, - s1.deserialize, s1.serialize('foo')) - self.assertRaises(SecurityError, - s2.deserialize, s2.serialize('foo')) - - def test_self_send(self): - s1 = self._get_s(KEY1, CERT1, [CERT1]) - s2 = self._get_s(KEY1, CERT1, [CERT1]) - self.assertEqual(s2.deserialize(s1.serialize('foo')), 'foo') - - def test_separate_ends(self): - s1 = self._get_s(KEY1, CERT1, [CERT2]) - s2 = self._get_s(KEY2, CERT2, [CERT1]) - self.assertEqual(s2.deserialize(s1.serialize('foo')), 'foo') - - def test_register_auth(self): - register_auth(KEY1, CERT1, '') - self.assertIn('application/data', registry._decoders) - - def test_lots_of_sign(self): - for i in range(1000): - rdata = base64.urlsafe_b64encode(os.urandom(265)) - s = self._get_s(KEY1, CERT1, [CERT1]) - self.assertEqual(s.deserialize(s.serialize(rdata)), rdata) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/slow/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/slow/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_canvas.py b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_canvas.py deleted file mode 100644 index 2508025..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_canvas.py +++ /dev/null @@ -1,346 +0,0 @@ -from __future__ import absolute_import - -from celery.canvas import ( - Signature, - chain, - group, - chord, - signature, - xmap, - xstarmap, - chunks, - _maybe_group, - maybe_signature, -) -from celery.result import EagerResult - -from celery.tests.case import AppCase, Mock - -SIG = Signature({'task': 'TASK', - 'args': ('A1', ), - 'kwargs': {'K1': 'V1'}, - 'options': {'task_id': 'TASK_ID'}, - 'subtask_type': ''}) - - -class CanvasCase(AppCase): - - def setup(self): - - @self.app.task(shared=False) - def add(x, y): - return x + y - self.add = add - - @self.app.task(shared=False) - def mul(x, y): - return x * y - self.mul = mul - - @self.app.task(shared=False) - def div(x, y): - return x / y - self.div = div - - -class test_Signature(CanvasCase): - - def test_getitem_property_class(self): - self.assertTrue(Signature.task) - self.assertTrue(Signature.args) - self.assertTrue(Signature.kwargs) - self.assertTrue(Signature.options) - self.assertTrue(Signature.subtask_type) - - def test_getitem_property(self): - self.assertEqual(SIG.task, 'TASK') - self.assertEqual(SIG.args, ('A1', )) - self.assertEqual(SIG.kwargs, {'K1': 'V1'}) - self.assertEqual(SIG.options, {'task_id': 'TASK_ID'}) - self.assertEqual(SIG.subtask_type, '') - - def test_link_on_scalar(self): - x = Signature('TASK', link=Signature('B')) - self.assertTrue(x.options['link']) - x.link(Signature('C')) - self.assertIsInstance(x.options['link'], list) - self.assertIn(Signature('B'), x.options['link']) - self.assertIn(Signature('C'), x.options['link']) - - def test_replace(self): - x = Signature('TASK', ('A'), {}) - self.assertTupleEqual(x.replace(args=('B', )).args, ('B', )) - self.assertDictEqual( - x.replace(kwargs={'FOO': 'BAR'}).kwargs, - {'FOO': 'BAR'}, - ) - self.assertDictEqual( - x.replace(options={'task_id': '123'}).options, - {'task_id': '123'}, - ) - - def test_set(self): - self.assertDictEqual( - Signature('TASK', x=1).set(task_id='2').options, - {'x': 1, 'task_id': '2'}, - ) - - def test_link(self): - x = signature(SIG) - x.link(SIG) - x.link(SIG) - self.assertIn(SIG, x.options['link']) - self.assertEqual(len(x.options['link']), 1) - - def test_link_error(self): - x = signature(SIG) - x.link_error(SIG) - x.link_error(SIG) - self.assertIn(SIG, x.options['link_error']) - self.assertEqual(len(x.options['link_error']), 1) - - def test_flatten_links(self): - tasks = [self.add.s(2, 2), self.mul.s(4), self.div.s(2)] - tasks[0].link(tasks[1]) - tasks[1].link(tasks[2]) - self.assertEqual(tasks[0].flatten_links(), tasks) - - def test_OR(self): - x = self.add.s(2, 2) | self.mul.s(4) - self.assertIsInstance(x, chain) - y = self.add.s(4, 4) | self.div.s(2) - z = x | y - self.assertIsInstance(y, chain) - self.assertIsInstance(z, chain) - self.assertEqual(len(z.tasks), 4) - with self.assertRaises(TypeError): - x | 10 - ax = self.add.s(2, 2) | (self.add.s(4) | self.add.s(8)) - self.assertIsInstance(ax, chain) - self.assertEqual(len(ax.tasks), 3, 'consolidates chain to chain') - - def test_INVERT(self): - x = self.add.s(2, 2) - x.apply_async = Mock() - x.apply_async.return_value = Mock() - x.apply_async.return_value.get = Mock() - x.apply_async.return_value.get.return_value = 4 - self.assertEqual(~x, 4) - self.assertTrue(x.apply_async.called) - - def test_merge_immutable(self): - x = self.add.si(2, 2, foo=1) - args, kwargs, options = x._merge((4, ), {'bar': 2}, {'task_id': 3}) - self.assertTupleEqual(args, (2, 2)) - self.assertDictEqual(kwargs, {'foo': 1}) - self.assertDictEqual(options, {'task_id': 3}) - - def test_set_immutable(self): - x = self.add.s(2, 2) - self.assertFalse(x.immutable) - x.set(immutable=True) - self.assertTrue(x.immutable) - x.set(immutable=False) - self.assertFalse(x.immutable) - - def test_election(self): - x = self.add.s(2, 2) - x.freeze('foo') - x.type.app.control = Mock() - r = x.election() - self.assertTrue(x.type.app.control.election.called) - self.assertEqual(r.id, 'foo') - - def test_AsyncResult_when_not_registered(self): - s = signature('xxx.not.registered', app=self.app) - self.assertTrue(s.AsyncResult) - - def test_apply_async_when_not_registered(self): - s = signature('xxx.not.registered', app=self.app) - self.assertTrue(s._apply_async) - - -class test_xmap_xstarmap(CanvasCase): - - def test_apply(self): - for type, attr in [(xmap, 'map'), (xstarmap, 'starmap')]: - args = [(i, i) for i in range(10)] - s = getattr(self.add, attr)(args) - s.type = Mock() - - s.apply_async(foo=1) - s.type.apply_async.assert_called_with( - (), {'task': self.add.s(), 'it': args}, foo=1, - ) - - self.assertEqual(type.from_dict(dict(s)), s) - self.assertTrue(repr(s)) - - -class test_chunks(CanvasCase): - - def test_chunks(self): - x = self.add.chunks(range(100), 10) - self.assertEqual( - dict(chunks.from_dict(dict(x), app=self.app)), dict(x), - ) - - self.assertTrue(x.group()) - self.assertEqual(len(x.group().tasks), 10) - - x.group = Mock() - gr = x.group.return_value = Mock() - - x.apply_async() - gr.apply_async.assert_called_with((), {}) - - x() - gr.assert_called_with() - - self.app.conf.CELERY_ALWAYS_EAGER = True - chunks.apply_chunks(app=self.app, **x['kwargs']) - - -class test_chain(CanvasCase): - - def test_repr(self): - x = self.add.s(2, 2) | self.add.s(2) - self.assertEqual( - repr(x), '%s(2, 2) | %s(2)' % (self.add.name, self.add.name), - ) - - def test_reverse(self): - x = self.add.s(2, 2) | self.add.s(2) - self.assertIsInstance(signature(x), chain) - self.assertIsInstance(signature(dict(x)), chain) - - def test_always_eager(self): - self.app.conf.CELERY_ALWAYS_EAGER = True - self.assertEqual(~(self.add.s(4, 4) | self.add.s(8)), 16) - - def test_apply(self): - x = chain(self.add.s(4, 4), self.add.s(8), self.add.s(10)) - res = x.apply() - self.assertIsInstance(res, EagerResult) - self.assertEqual(res.get(), 26) - - self.assertEqual(res.parent.get(), 16) - self.assertEqual(res.parent.parent.get(), 8) - self.assertIsNone(res.parent.parent.parent) - - def test_empty_chain_returns_none(self): - self.assertIsNone(chain(app=self.app)()) - self.assertIsNone(chain(app=self.app).apply_async()) - - def test_call_no_tasks(self): - x = chain() - self.assertFalse(x()) - - def test_call_with_tasks(self): - x = self.add.s(2, 2) | self.add.s(4) - x.apply_async = Mock() - x(2, 2, foo=1) - x.apply_async.assert_called_with((2, 2), {'foo': 1}) - - def test_from_dict_no_args__with_args(self): - x = dict(self.add.s(2, 2) | self.add.s(4)) - x['args'] = None - self.assertIsInstance(chain.from_dict(x), chain) - x['args'] = (2, ) - self.assertIsInstance(chain.from_dict(x), chain) - - def test_accepts_generator_argument(self): - x = chain(self.add.s(i) for i in range(10)) - self.assertTrue(x.tasks[0].type, self.add) - self.assertTrue(x.type) - - -class test_group(CanvasCase): - - def test_repr(self): - x = group([self.add.s(2, 2), self.add.s(4, 4)]) - self.assertEqual(repr(x), repr(x.tasks)) - - def test_reverse(self): - x = group([self.add.s(2, 2), self.add.s(4, 4)]) - self.assertIsInstance(signature(x), group) - self.assertIsInstance(signature(dict(x)), group) - - def test_maybe_group_sig(self): - self.assertListEqual( - _maybe_group(self.add.s(2, 2)), [self.add.s(2, 2)], - ) - - def test_from_dict(self): - x = group([self.add.s(2, 2), self.add.s(4, 4)]) - x['args'] = (2, 2) - self.assertTrue(group.from_dict(dict(x))) - x['args'] = None - self.assertTrue(group.from_dict(dict(x))) - - def test_call_empty_group(self): - x = group(app=self.app) - self.assertFalse(len(x())) - x.delay() - x.apply_async() - x() - - def test_skew(self): - g = group([self.add.s(i, i) for i in range(10)]) - g.skew(start=1, stop=10, step=1) - for i, task in enumerate(g.tasks): - self.assertEqual(task.options['countdown'], i + 1) - - def test_iter(self): - g = group([self.add.s(i, i) for i in range(10)]) - self.assertListEqual(list(iter(g)), g.tasks) - - -class test_chord(CanvasCase): - - def test_reverse(self): - x = chord([self.add.s(2, 2), self.add.s(4, 4)], body=self.mul.s(4)) - self.assertIsInstance(signature(x), chord) - self.assertIsInstance(signature(dict(x)), chord) - - def test_clone_clones_body(self): - x = chord([self.add.s(2, 2), self.add.s(4, 4)], body=self.mul.s(4)) - y = x.clone() - self.assertIsNot(x.kwargs['body'], y.kwargs['body']) - y.kwargs.pop('body') - z = y.clone() - self.assertIsNone(z.kwargs.get('body')) - - def test_links_to_body(self): - x = chord([self.add.s(2, 2), self.add.s(4, 4)], body=self.mul.s(4)) - x.link(self.div.s(2)) - self.assertFalse(x.options.get('link')) - self.assertTrue(x.kwargs['body'].options['link']) - - x.link_error(self.div.s(2)) - self.assertFalse(x.options.get('link_error')) - self.assertTrue(x.kwargs['body'].options['link_error']) - - self.assertTrue(x.tasks) - self.assertTrue(x.body) - - def test_repr(self): - x = chord([self.add.s(2, 2), self.add.s(4, 4)], body=self.mul.s(4)) - self.assertTrue(repr(x)) - x.kwargs['body'] = None - self.assertIn('without body', repr(x)) - - -class test_maybe_signature(CanvasCase): - - def test_is_None(self): - self.assertIsNone(maybe_signature(None, app=self.app)) - - def test_is_dict(self): - self.assertIsInstance( - maybe_signature(dict(self.add.s()), app=self.app), Signature, - ) - - def test_when_sig(self): - s = self.add.s() - self.assertIs(maybe_signature(s, app=self.app), s) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_chord.py b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_chord.py deleted file mode 100644 index dcc3304..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_chord.py +++ /dev/null @@ -1,235 +0,0 @@ -from __future__ import absolute_import - -from contextlib import contextmanager - -from celery import group -from celery import canvas -from celery import result -from celery.exceptions import ChordError, Retry -from celery.five import range -from celery.result import AsyncResult, GroupResult, EagerResult -from celery.tests.case import AppCase, Mock - - -def passthru(x): - return x - - -class ChordCase(AppCase): - - def setup(self): - - @self.app.task(shared=False) - def add(x, y): - return x + y - self.add = add - - -class TSR(GroupResult): - is_ready = True - value = None - - def ready(self): - return self.is_ready - - def join(self, propagate=True, **kwargs): - if propagate: - for value in self.value: - if isinstance(value, Exception): - raise value - return self.value - join_native = join - - def _failed_join_report(self): - for value in self.value: - if isinstance(value, Exception): - yield EagerResult('some_id', value, 'FAILURE') - - -class TSRNoReport(TSR): - - def _failed_join_report(self): - return iter([]) - - -@contextmanager -def patch_unlock_retry(app): - unlock = app.tasks['celery.chord_unlock'] - retry = Mock() - retry.return_value = Retry() - prev, unlock.retry = unlock.retry, retry - try: - yield unlock, retry - finally: - unlock.retry = prev - - -class test_unlock_chord_task(ChordCase): - - def test_unlock_ready(self): - - class AlwaysReady(TSR): - is_ready = True - value = [2, 4, 8, 6] - - with self._chord_context(AlwaysReady) as (cb, retry, _): - cb.type.apply_async.assert_called_with( - ([2, 4, 8, 6], ), {}, task_id=cb.id, - ) - # did not retry - self.assertFalse(retry.call_count) - - def test_callback_fails(self): - - class AlwaysReady(TSR): - is_ready = True - value = [2, 4, 8, 6] - - def setup(callback): - callback.apply_async.side_effect = IOError() - - with self._chord_context(AlwaysReady, setup) as (cb, retry, fail): - self.assertTrue(fail.called) - self.assertEqual( - fail.call_args[0][0], cb.id, - ) - self.assertIsInstance( - fail.call_args[1]['exc'], ChordError, - ) - - def test_unlock_ready_failed(self): - - class Failed(TSR): - is_ready = True - value = [2, KeyError('foo'), 8, 6] - - with self._chord_context(Failed) as (cb, retry, fail_current): - self.assertFalse(cb.type.apply_async.called) - # did not retry - self.assertFalse(retry.call_count) - self.assertTrue(fail_current.called) - self.assertEqual( - fail_current.call_args[0][0], cb.id, - ) - self.assertIsInstance( - fail_current.call_args[1]['exc'], ChordError, - ) - self.assertIn('some_id', str(fail_current.call_args[1]['exc'])) - - def test_unlock_ready_failed_no_culprit(self): - class Failed(TSRNoReport): - is_ready = True - value = [2, KeyError('foo'), 8, 6] - - with self._chord_context(Failed) as (cb, retry, fail_current): - self.assertTrue(fail_current.called) - self.assertEqual( - fail_current.call_args[0][0], cb.id, - ) - self.assertIsInstance( - fail_current.call_args[1]['exc'], ChordError, - ) - - @contextmanager - def _chord_context(self, ResultCls, setup=None, **kwargs): - @self.app.task(shared=False) - def callback(*args, **kwargs): - pass - self.app.finalize() - - pts, result.GroupResult = result.GroupResult, ResultCls - callback.apply_async = Mock() - callback_s = callback.s() - callback_s.id = 'callback_id' - fail_current = self.app.backend.fail_from_current_stack = Mock() - try: - with patch_unlock_retry(self.app) as (unlock, retry): - subtask, canvas.maybe_signature = ( - canvas.maybe_signature, passthru, - ) - if setup: - setup(callback) - try: - assert self.app.tasks['celery.chord_unlock'] is unlock - try: - unlock( - 'group_id', callback_s, - result=[ - self.app.AsyncResult(r) for r in ['1', 2, 3] - ], - GroupResult=ResultCls, **kwargs - ) - except Retry: - pass - finally: - canvas.maybe_signature = subtask - yield callback_s, retry, fail_current - finally: - result.GroupResult = pts - - def test_when_not_ready(self): - class NeverReady(TSR): - is_ready = False - - with self._chord_context(NeverReady, interval=10, max_retries=30) \ - as (cb, retry, _): - self.assertFalse(cb.type.apply_async.called) - # did retry - retry.assert_called_with(countdown=10, max_retries=30) - - def test_is_in_registry(self): - self.assertIn('celery.chord_unlock', self.app.tasks) - - -class test_chord(ChordCase): - - def test_eager(self): - from celery import chord - - @self.app.task(shared=False) - def addX(x, y): - return x + y - - @self.app.task(shared=False) - def sumX(n): - return sum(n) - - self.app.conf.CELERY_ALWAYS_EAGER = True - x = chord(addX.s(i, i) for i in range(10)) - body = sumX.s() - result = x(body) - self.assertEqual(result.get(), sum(i + i for i in range(10))) - - def test_apply(self): - self.app.conf.CELERY_ALWAYS_EAGER = False - from celery import chord - - m = Mock() - m.app.conf.CELERY_ALWAYS_EAGER = False - m.AsyncResult = AsyncResult - prev, chord._type = chord._type, m - try: - x = chord(self.add.s(i, i) for i in range(10)) - body = self.add.s(2) - result = x(body) - self.assertTrue(result.id) - # does not modify original subtask - with self.assertRaises(KeyError): - body.options['task_id'] - self.assertTrue(chord._type.called) - finally: - chord._type = prev - - -class test_Chord_task(ChordCase): - - def test_run(self): - self.app.backend = Mock() - self.app.backend.cleanup = Mock() - self.app.backend.cleanup.__name__ = 'cleanup' - Chord = self.app.tasks['celery.chord'] - - body = dict() - Chord(group(self.add.subtask((i, i)) for i in range(5)), body) - Chord([self.add.subtask((j, j)) for j in range(5)], body) - self.assertEqual(self.app.backend.apply_chord.call_count, 2) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_context.py b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_context.py deleted file mode 100644 index ecad3f8..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_context.py +++ /dev/null @@ -1,67 +0,0 @@ -# -*- coding: utf-8 -*-' -from __future__ import absolute_import - -from celery.app.task import Context -from celery.tests.case import AppCase - - -# Retreive the values of all context attributes as a -# dictionary in an implementation-agnostic manner. -def get_context_as_dict(ctx, getter=getattr): - defaults = {} - for attr_name in dir(ctx): - if attr_name.startswith('_'): - continue # Ignore pseudo-private attributes - attr = getter(ctx, attr_name) - if callable(attr): - continue # Ignore methods and other non-trivial types - defaults[attr_name] = attr - return defaults -default_context = get_context_as_dict(Context()) - - -class test_Context(AppCase): - - def test_default_context(self): - # A bit of a tautological test, since it uses the same - # initializer as the default_context constructor. - defaults = dict(default_context, children=[]) - self.assertDictEqual(get_context_as_dict(Context()), defaults) - - def test_updated_context(self): - expected = dict(default_context) - changes = dict(id='unique id', args=['some', 1], wibble='wobble') - ctx = Context() - expected.update(changes) - ctx.update(changes) - self.assertDictEqual(get_context_as_dict(ctx), expected) - self.assertDictEqual(get_context_as_dict(Context()), default_context) - - def test_modified_context(self): - expected = dict(default_context) - ctx = Context() - expected['id'] = 'unique id' - expected['args'] = ['some', 1] - ctx.id = 'unique id' - ctx.args = ['some', 1] - self.assertDictEqual(get_context_as_dict(ctx), expected) - self.assertDictEqual(get_context_as_dict(Context()), default_context) - - def test_cleared_context(self): - changes = dict(id='unique id', args=['some', 1], wibble='wobble') - ctx = Context() - ctx.update(changes) - ctx.clear() - defaults = dict(default_context, children=[]) - self.assertDictEqual(get_context_as_dict(ctx), defaults) - self.assertDictEqual(get_context_as_dict(Context()), defaults) - - def test_context_get(self): - expected = dict(default_context) - changes = dict(id='unique id', args=['some', 1], wibble='wobble') - ctx = Context() - expected.update(changes) - ctx.update(changes) - ctx_dict = get_context_as_dict(ctx, getter=Context.get) - self.assertDictEqual(ctx_dict, expected) - self.assertDictEqual(get_context_as_dict(Context()), default_context) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_result.py b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_result.py deleted file mode 100644 index 50a9e23..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_result.py +++ /dev/null @@ -1,731 +0,0 @@ -from __future__ import absolute_import - -from contextlib import contextmanager - -from celery import states -from celery.exceptions import IncompleteStream, TimeoutError -from celery.five import range -from celery.result import ( - AsyncResult, - EagerResult, - TaskSetResult, - result_from_tuple, -) -from celery.utils import uuid -from celery.utils.serialization import pickle - -from celery.tests.case import AppCase, Mock, depends_on_current_app, patch - - -def mock_task(name, state, result): - return dict(id=uuid(), name=name, state=state, result=result) - - -def save_result(app, task): - traceback = 'Some traceback' - if task['state'] == states.SUCCESS: - app.backend.mark_as_done(task['id'], task['result']) - elif task['state'] == states.RETRY: - app.backend.mark_as_retry( - task['id'], task['result'], traceback=traceback, - ) - else: - app.backend.mark_as_failure( - task['id'], task['result'], traceback=traceback, - ) - - -def make_mock_group(app, size=10): - tasks = [mock_task('ts%d' % i, states.SUCCESS, i) for i in range(size)] - [save_result(app, task) for task in tasks] - return [app.AsyncResult(task['id']) for task in tasks] - - -class test_AsyncResult(AppCase): - - def setup(self): - self.task1 = mock_task('task1', states.SUCCESS, 'the') - self.task2 = mock_task('task2', states.SUCCESS, 'quick') - self.task3 = mock_task('task3', states.FAILURE, KeyError('brown')) - self.task4 = mock_task('task3', states.RETRY, KeyError('red')) - - for task in (self.task1, self.task2, self.task3, self.task4): - save_result(self.app, task) - - @self.app.task(shared=False) - def mytask(): - pass - self.mytask = mytask - - def test_compat_properties(self): - x = self.app.AsyncResult('1') - self.assertEqual(x.task_id, x.id) - x.task_id = '2' - self.assertEqual(x.id, '2') - - def test_children(self): - x = self.app.AsyncResult('1') - children = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)] - x._cache = {'children': children, 'status': states.SUCCESS} - x.backend = Mock() - self.assertTrue(x.children) - self.assertEqual(len(x.children), 3) - - def test_propagates_for_parent(self): - x = self.app.AsyncResult(uuid()) - x.backend = Mock(name='backend') - x.backend.get_task_meta.return_value = {} - x.backend.wait_for.return_value = { - 'status': states.SUCCESS, 'result': 84, - } - x.parent = EagerResult(uuid(), KeyError('foo'), states.FAILURE) - with self.assertRaises(KeyError): - x.get(propagate=True) - self.assertFalse(x.backend.wait_for.called) - - x.parent = EagerResult(uuid(), 42, states.SUCCESS) - self.assertEqual(x.get(propagate=True), 84) - self.assertTrue(x.backend.wait_for.called) - - def test_get_children(self): - tid = uuid() - x = self.app.AsyncResult(tid) - child = [self.app.AsyncResult(uuid()).as_tuple() - for i in range(10)] - x._cache = {'children': child} - self.assertTrue(x.children) - self.assertEqual(len(x.children), 10) - - x._cache = {'status': states.SUCCESS} - x.backend._cache[tid] = {'result': None} - self.assertIsNone(x.children) - - def test_build_graph_get_leaf_collect(self): - x = self.app.AsyncResult('1') - x.backend._cache['1'] = {'status': states.SUCCESS, 'result': None} - c = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)] - x.iterdeps = Mock() - x.iterdeps.return_value = ( - (None, x), - (x, c[0]), - (c[0], c[1]), - (c[1], c[2]) - ) - x.backend.READY_STATES = states.READY_STATES - self.assertTrue(x.graph) - - self.assertIs(x.get_leaf(), 2) - - it = x.collect() - self.assertListEqual(list(it), [ - (x, None), - (c[0], 0), - (c[1], 1), - (c[2], 2), - ]) - - def test_iterdeps(self): - x = self.app.AsyncResult('1') - c = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)] - x._cache = {'status': states.SUCCESS, 'result': None, 'children': c} - for child in c: - child.backend = Mock() - child.backend.get_children.return_value = [] - it = x.iterdeps() - self.assertListEqual(list(it), [ - (None, x), - (x, c[0]), - (x, c[1]), - (x, c[2]), - ]) - x._cache = None - x.ready = Mock() - x.ready.return_value = False - with self.assertRaises(IncompleteStream): - list(x.iterdeps()) - list(x.iterdeps(intermediate=True)) - - def test_eq_not_implemented(self): - self.assertFalse(self.app.AsyncResult('1') == object()) - - @depends_on_current_app - def test_reduce(self): - a1 = self.app.AsyncResult('uuid', task_name=self.mytask.name) - restored = pickle.loads(pickle.dumps(a1)) - self.assertEqual(restored.id, 'uuid') - self.assertEqual(restored.task_name, self.mytask.name) - - a2 = self.app.AsyncResult('uuid') - self.assertEqual(pickle.loads(pickle.dumps(a2)).id, 'uuid') - - def test_successful(self): - ok_res = self.app.AsyncResult(self.task1['id']) - nok_res = self.app.AsyncResult(self.task3['id']) - nok_res2 = self.app.AsyncResult(self.task4['id']) - - self.assertTrue(ok_res.successful()) - self.assertFalse(nok_res.successful()) - self.assertFalse(nok_res2.successful()) - - pending_res = self.app.AsyncResult(uuid()) - self.assertFalse(pending_res.successful()) - - def test_str(self): - ok_res = self.app.AsyncResult(self.task1['id']) - ok2_res = self.app.AsyncResult(self.task2['id']) - nok_res = self.app.AsyncResult(self.task3['id']) - self.assertEqual(str(ok_res), self.task1['id']) - self.assertEqual(str(ok2_res), self.task2['id']) - self.assertEqual(str(nok_res), self.task3['id']) - - pending_id = uuid() - pending_res = self.app.AsyncResult(pending_id) - self.assertEqual(str(pending_res), pending_id) - - def test_repr(self): - ok_res = self.app.AsyncResult(self.task1['id']) - ok2_res = self.app.AsyncResult(self.task2['id']) - nok_res = self.app.AsyncResult(self.task3['id']) - self.assertEqual(repr(ok_res), '' % ( - self.task1['id'])) - self.assertEqual(repr(ok2_res), '' % ( - self.task2['id'])) - self.assertEqual(repr(nok_res), '' % ( - self.task3['id'])) - - pending_id = uuid() - pending_res = self.app.AsyncResult(pending_id) - self.assertEqual(repr(pending_res), '' % ( - pending_id)) - - def test_hash(self): - self.assertEqual(hash(self.app.AsyncResult('x0w991')), - hash(self.app.AsyncResult('x0w991'))) - self.assertNotEqual(hash(self.app.AsyncResult('x0w991')), - hash(self.app.AsyncResult('x1w991'))) - - def test_get_traceback(self): - ok_res = self.app.AsyncResult(self.task1['id']) - nok_res = self.app.AsyncResult(self.task3['id']) - nok_res2 = self.app.AsyncResult(self.task4['id']) - self.assertFalse(ok_res.traceback) - self.assertTrue(nok_res.traceback) - self.assertTrue(nok_res2.traceback) - - pending_res = self.app.AsyncResult(uuid()) - self.assertFalse(pending_res.traceback) - - def test_get(self): - ok_res = self.app.AsyncResult(self.task1['id']) - ok2_res = self.app.AsyncResult(self.task2['id']) - nok_res = self.app.AsyncResult(self.task3['id']) - nok2_res = self.app.AsyncResult(self.task4['id']) - - self.assertEqual(ok_res.get(), 'the') - self.assertEqual(ok2_res.get(), 'quick') - with self.assertRaises(KeyError): - nok_res.get() - self.assertTrue(nok_res.get(propagate=False)) - self.assertIsInstance(nok2_res.result, KeyError) - self.assertEqual(ok_res.info, 'the') - - def test_get_timeout(self): - res = self.app.AsyncResult(self.task4['id']) # has RETRY state - with self.assertRaises(TimeoutError): - res.get(timeout=0.001) - - pending_res = self.app.AsyncResult(uuid()) - with patch('celery.result.time') as _time: - with self.assertRaises(TimeoutError): - pending_res.get(timeout=0.001, interval=0.001) - _time.sleep.assert_called_with(0.001) - - def test_get_timeout_longer(self): - res = self.app.AsyncResult(self.task4['id']) # has RETRY state - with patch('celery.result.time') as _time: - with self.assertRaises(TimeoutError): - res.get(timeout=1, interval=1) - _time.sleep.assert_called_with(1) - - def test_ready(self): - oks = (self.app.AsyncResult(self.task1['id']), - self.app.AsyncResult(self.task2['id']), - self.app.AsyncResult(self.task3['id'])) - self.assertTrue(all(result.ready() for result in oks)) - self.assertFalse(self.app.AsyncResult(self.task4['id']).ready()) - - self.assertFalse(self.app.AsyncResult(uuid()).ready()) - - -class test_ResultSet(AppCase): - - def test_resultset_repr(self): - self.assertTrue(repr(self.app.ResultSet( - [self.app.AsyncResult(t) for t in ['1', '2', '3']]))) - - def test_eq_other(self): - self.assertFalse(self.app.ResultSet([1, 3, 3]) == 1) - self.assertTrue(self.app.ResultSet([1]) == self.app.ResultSet([1])) - - def test_get(self): - x = self.app.ResultSet([self.app.AsyncResult(t) for t in [1, 2, 3]]) - b = x.results[0].backend = Mock() - b.supports_native_join = False - x.join_native = Mock() - x.join = Mock() - x.get() - self.assertTrue(x.join.called) - b.supports_native_join = True - x.get() - self.assertTrue(x.join_native.called) - - def test_get_empty(self): - x = self.app.ResultSet([]) - self.assertIsNone(x.supports_native_join) - x.join = Mock(name='join') - x.get() - self.assertTrue(x.join.called) - - def test_add(self): - x = self.app.ResultSet([1]) - x.add(2) - self.assertEqual(len(x), 2) - x.add(2) - self.assertEqual(len(x), 2) - - @contextmanager - def dummy_copy(self): - with patch('celery.result.copy') as copy: - - def passt(arg): - return arg - copy.side_effect = passt - - yield - - def test_iterate_respects_subpolling_interval(self): - r1 = self.app.AsyncResult(uuid()) - r2 = self.app.AsyncResult(uuid()) - backend = r1.backend = r2.backend = Mock() - backend.subpolling_interval = 10 - - ready = r1.ready = r2.ready = Mock() - - def se(*args, **kwargs): - ready.side_effect = KeyError() - return False - ready.return_value = False - ready.side_effect = se - - x = self.app.ResultSet([r1, r2]) - with self.dummy_copy(): - with patch('celery.result.time') as _time: - with self.assertPendingDeprecation(): - with self.assertRaises(KeyError): - list(x.iterate()) - _time.sleep.assert_called_with(10) - - backend.subpolling_interval = 0 - with patch('celery.result.time') as _time: - with self.assertPendingDeprecation(): - with self.assertRaises(KeyError): - ready.return_value = False - ready.side_effect = se - list(x.iterate()) - self.assertFalse(_time.sleep.called) - - def test_times_out(self): - r1 = self.app.AsyncResult(uuid) - r1.ready = Mock() - r1.ready.return_value = False - x = self.app.ResultSet([r1]) - with self.dummy_copy(): - with patch('celery.result.time'): - with self.assertPendingDeprecation(): - with self.assertRaises(TimeoutError): - list(x.iterate(timeout=1)) - - def test_add_discard(self): - x = self.app.ResultSet([]) - x.add(self.app.AsyncResult('1')) - self.assertIn(self.app.AsyncResult('1'), x.results) - x.discard(self.app.AsyncResult('1')) - x.discard(self.app.AsyncResult('1')) - x.discard('1') - self.assertNotIn(self.app.AsyncResult('1'), x.results) - - x.update([self.app.AsyncResult('2')]) - - def test_clear(self): - x = self.app.ResultSet([]) - r = x.results - x.clear() - self.assertIs(x.results, r) - - -class MockAsyncResultFailure(AsyncResult): - - @property - def result(self): - return KeyError('baz') - - @property - def state(self): - return states.FAILURE - - def get(self, propagate=True, **kwargs): - if propagate: - raise self.result - return self.result - - -class MockAsyncResultSuccess(AsyncResult): - forgotten = False - - def forget(self): - self.forgotten = True - - @property - def result(self): - return 42 - - @property - def state(self): - return states.SUCCESS - - def get(self, **kwargs): - return self.result - - -class SimpleBackend(object): - ids = [] - - def __init__(self, ids=[]): - self.ids = ids - - def get_many(self, *args, **kwargs): - return ((id, {'result': i, 'status': states.SUCCESS}) - for i, id in enumerate(self.ids)) - - -class test_TaskSetResult(AppCase): - - def setup(self): - self.size = 10 - self.ts = TaskSetResult(uuid(), make_mock_group(self.app, self.size)) - - def test_total(self): - self.assertEqual(self.ts.total, self.size) - - def test_compat_properties(self): - self.assertEqual(self.ts.taskset_id, self.ts.id) - self.ts.taskset_id = 'foo' - self.assertEqual(self.ts.taskset_id, 'foo') - - def test_compat_subtasks_kwarg(self): - x = TaskSetResult(uuid(), subtasks=[1, 2, 3]) - self.assertEqual(x.results, [1, 2, 3]) - - def test_itersubtasks(self): - it = self.ts.itersubtasks() - - for i, t in enumerate(it): - self.assertEqual(t.get(), i) - - -class test_GroupResult(AppCase): - - def setup(self): - self.size = 10 - self.ts = self.app.GroupResult( - uuid(), make_mock_group(self.app, self.size), - ) - - @depends_on_current_app - def test_is_pickleable(self): - ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) - self.assertEqual(pickle.loads(pickle.dumps(ts)), ts) - ts2 = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) - self.assertEqual(pickle.loads(pickle.dumps(ts2)), ts2) - - def test_len(self): - self.assertEqual(len(self.ts), self.size) - - def test_eq_other(self): - self.assertFalse(self.ts == 1) - - @depends_on_current_app - def test_reduce(self): - self.assertTrue(pickle.loads(pickle.dumps(self.ts))) - - def test_iterate_raises(self): - ar = MockAsyncResultFailure(uuid(), app=self.app) - ts = self.app.GroupResult(uuid(), [ar]) - with self.assertPendingDeprecation(): - it = ts.iterate() - with self.assertRaises(KeyError): - next(it) - - def test_forget(self): - subs = [MockAsyncResultSuccess(uuid(), app=self.app), - MockAsyncResultSuccess(uuid(), app=self.app)] - ts = self.app.GroupResult(uuid(), subs) - ts.forget() - for sub in subs: - self.assertTrue(sub.forgotten) - - def test_getitem(self): - subs = [MockAsyncResultSuccess(uuid(), app=self.app), - MockAsyncResultSuccess(uuid(), app=self.app)] - ts = self.app.GroupResult(uuid(), subs) - self.assertIs(ts[0], subs[0]) - - def test_save_restore(self): - subs = [MockAsyncResultSuccess(uuid(), app=self.app), - MockAsyncResultSuccess(uuid(), app=self.app)] - ts = self.app.GroupResult(uuid(), subs) - ts.save() - with self.assertRaises(AttributeError): - ts.save(backend=object()) - self.assertEqual(self.app.GroupResult.restore(ts.id).subtasks, - ts.subtasks) - ts.delete() - self.assertIsNone(self.app.GroupResult.restore(ts.id)) - with self.assertRaises(AttributeError): - self.app.GroupResult.restore(ts.id, backend=object()) - - def test_join_native(self): - backend = SimpleBackend() - subtasks = [self.app.AsyncResult(uuid(), backend=backend) - for i in range(10)] - ts = self.app.GroupResult(uuid(), subtasks) - ts.app.backend = backend - backend.ids = [subtask.id for subtask in subtasks] - res = ts.join_native() - self.assertEqual(res, list(range(10))) - - def test_join_native_raises(self): - ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) - ts.iter_native = Mock() - ts.iter_native.return_value = iter([ - (uuid(), {'status': states.FAILURE, 'result': KeyError()}) - ]) - with self.assertRaises(KeyError): - ts.join_native(propagate=True) - - def test_failed_join_report(self): - res = Mock() - ts = self.app.GroupResult(uuid(), [res]) - res.state = states.FAILURE - res.backend.is_cached.return_value = True - self.assertIs(next(ts._failed_join_report()), res) - res.backend.is_cached.return_value = False - with self.assertRaises(StopIteration): - next(ts._failed_join_report()) - - def test_repr(self): - self.assertTrue(repr( - self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) - )) - - def test_children_is_results(self): - ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) - self.assertIs(ts.children, ts.results) - - def test_iter_native(self): - backend = SimpleBackend() - subtasks = [self.app.AsyncResult(uuid(), backend=backend) - for i in range(10)] - ts = self.app.GroupResult(uuid(), subtasks) - ts.app.backend = backend - backend.ids = [subtask.id for subtask in subtasks] - self.assertEqual(len(list(ts.iter_native())), 10) - - def test_iterate_yields(self): - ar = MockAsyncResultSuccess(uuid(), app=self.app) - ar2 = MockAsyncResultSuccess(uuid(), app=self.app) - ts = self.app.GroupResult(uuid(), [ar, ar2]) - with self.assertPendingDeprecation(): - it = ts.iterate() - self.assertEqual(next(it), 42) - self.assertEqual(next(it), 42) - - def test_iterate_eager(self): - ar1 = EagerResult(uuid(), 42, states.SUCCESS) - ar2 = EagerResult(uuid(), 42, states.SUCCESS) - ts = self.app.GroupResult(uuid(), [ar1, ar2]) - with self.assertPendingDeprecation(): - it = ts.iterate() - self.assertEqual(next(it), 42) - self.assertEqual(next(it), 42) - - def test_join_timeout(self): - ar = MockAsyncResultSuccess(uuid(), app=self.app) - ar2 = MockAsyncResultSuccess(uuid(), app=self.app) - ar3 = self.app.AsyncResult(uuid()) - ts = self.app.GroupResult(uuid(), [ar, ar2, ar3]) - with self.assertRaises(TimeoutError): - ts.join(timeout=0.0000001) - - ar4 = self.app.AsyncResult(uuid()) - ar4.get = Mock() - ts2 = self.app.GroupResult(uuid(), [ar4]) - self.assertTrue(ts2.join(timeout=0.1)) - - def test_iter_native_when_empty_group(self): - ts = self.app.GroupResult(uuid(), []) - self.assertListEqual(list(ts.iter_native()), []) - - def test_iterate_simple(self): - with self.assertPendingDeprecation(): - it = self.ts.iterate() - results = sorted(list(it)) - self.assertListEqual(results, list(range(self.size))) - - def test___iter__(self): - self.assertListEqual(list(iter(self.ts)), self.ts.results) - - def test_join(self): - joined = self.ts.join() - self.assertListEqual(joined, list(range(self.size))) - - def test_successful(self): - self.assertTrue(self.ts.successful()) - - def test_failed(self): - self.assertFalse(self.ts.failed()) - - def test_waiting(self): - self.assertFalse(self.ts.waiting()) - - def test_ready(self): - self.assertTrue(self.ts.ready()) - - def test_completed_count(self): - self.assertEqual(self.ts.completed_count(), len(self.ts)) - - -class test_pending_AsyncResult(AppCase): - - def setup(self): - self.task = self.app.AsyncResult(uuid()) - - def test_result(self): - self.assertIsNone(self.task.result) - - -class test_failed_AsyncResult(test_GroupResult): - - def setup(self): - self.size = 11 - subtasks = make_mock_group(self.app, 10) - failed = mock_task('ts11', states.FAILURE, KeyError('Baz')) - save_result(self.app, failed) - failed_res = self.app.AsyncResult(failed['id']) - self.ts = self.app.GroupResult(uuid(), subtasks + [failed_res]) - - def test_completed_count(self): - self.assertEqual(self.ts.completed_count(), len(self.ts) - 1) - - def test_iterate_simple(self): - with self.assertPendingDeprecation(): - it = self.ts.iterate() - - def consume(): - return list(it) - - with self.assertRaises(KeyError): - consume() - - def test_join(self): - with self.assertRaises(KeyError): - self.ts.join() - - def test_successful(self): - self.assertFalse(self.ts.successful()) - - def test_failed(self): - self.assertTrue(self.ts.failed()) - - -class test_pending_Group(AppCase): - - def setup(self): - self.ts = self.app.GroupResult( - uuid(), [self.app.AsyncResult(uuid()), - self.app.AsyncResult(uuid())]) - - def test_completed_count(self): - self.assertEqual(self.ts.completed_count(), 0) - - def test_ready(self): - self.assertFalse(self.ts.ready()) - - def test_waiting(self): - self.assertTrue(self.ts.waiting()) - - def x_join(self): - with self.assertRaises(TimeoutError): - self.ts.join(timeout=0.001) - - def x_join_longer(self): - with self.assertRaises(TimeoutError): - self.ts.join(timeout=1) - - -class test_EagerResult(AppCase): - - def setup(self): - - @self.app.task(shared=False) - def raising(x, y): - raise KeyError(x, y) - self.raising = raising - - def test_wait_raises(self): - res = self.raising.apply(args=[3, 3]) - with self.assertRaises(KeyError): - res.wait() - self.assertTrue(res.wait(propagate=False)) - - def test_wait(self): - res = EagerResult('x', 'x', states.RETRY) - res.wait() - self.assertEqual(res.state, states.RETRY) - self.assertEqual(res.status, states.RETRY) - - def test_forget(self): - res = EagerResult('x', 'x', states.RETRY) - res.forget() - - def test_revoke(self): - res = self.raising.apply(args=[3, 3]) - self.assertFalse(res.revoke()) - - -class test_tuples(AppCase): - - def test_AsyncResult(self): - x = self.app.AsyncResult(uuid()) - self.assertEqual(x, result_from_tuple(x.as_tuple(), self.app)) - self.assertEqual(x, result_from_tuple(x, self.app)) - - def test_with_parent(self): - x = self.app.AsyncResult(uuid()) - x.parent = self.app.AsyncResult(uuid()) - y = result_from_tuple(x.as_tuple(), self.app) - self.assertEqual(y, x) - self.assertEqual(y.parent, x.parent) - self.assertIsInstance(y.parent, AsyncResult) - - def test_compat(self): - uid = uuid() - x = result_from_tuple([uid, []], app=self.app) - self.assertEqual(x.id, uid) - - def test_GroupResult(self): - x = self.app.GroupResult( - uuid(), [self.app.AsyncResult(uuid()) for _ in range(10)], - ) - self.assertEqual(x, result_from_tuple(x.as_tuple(), self.app)) - self.assertEqual(x, result_from_tuple(x, self.app)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_states.py b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_states.py deleted file mode 100644 index b30a4ee..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_states.py +++ /dev/null @@ -1,31 +0,0 @@ -from __future__ import absolute_import - -from celery.states import state -from celery import states -from celery.tests.case import Case - - -class test_state_precedence(Case): - - def test_gt(self): - self.assertGreater(state(states.SUCCESS), - state(states.PENDING)) - self.assertGreater(state(states.FAILURE), - state(states.RECEIVED)) - self.assertGreater(state(states.REVOKED), - state(states.STARTED)) - self.assertGreater(state(states.SUCCESS), - state('CRASHED')) - self.assertGreater(state(states.FAILURE), - state('CRASHED')) - self.assertFalse(state(states.REVOKED) > state('CRASHED')) - - def test_lt(self): - self.assertLess(state(states.PENDING), state(states.SUCCESS)) - self.assertLess(state(states.RECEIVED), state(states.FAILURE)) - self.assertLess(state(states.STARTED), state(states.REVOKED)) - self.assertLess(state('CRASHED'), state(states.SUCCESS)) - self.assertLess(state('CRASHED'), state(states.FAILURE)) - self.assertTrue(state(states.REVOKED) < state('CRASHED')) - self.assertTrue(state(states.REVOKED) <= state('CRASHED')) - self.assertTrue(state('CRASHED') >= state(states.REVOKED)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_tasks.py b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_tasks.py deleted file mode 100644 index 4feae0b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_tasks.py +++ /dev/null @@ -1,464 +0,0 @@ -from __future__ import absolute_import - -from datetime import datetime, timedelta - -from kombu import Queue - -from celery import Task - -from celery.exceptions import Retry -from celery.five import items, range, string_t -from celery.result import EagerResult -from celery.utils import uuid -from celery.utils.timeutils import parse_iso8601 - -from celery.tests.case import AppCase, depends_on_current_app, patch - - -def return_True(*args, **kwargs): - # Task run functions can't be closures/lambdas, as they're pickled. - return True - - -def raise_exception(self, **kwargs): - raise Exception('%s error' % self.__class__) - - -class MockApplyTask(Task): - abstract = True - applied = 0 - - def run(self, x, y): - return x * y - - def apply_async(self, *args, **kwargs): - self.applied += 1 - - -class TasksCase(AppCase): - - def setup(self): - self.mytask = self.app.task(shared=False)(return_True) - - @self.app.task(bind=True, count=0, shared=False) - def increment_counter(self, increment_by=1): - self.count += increment_by or 1 - return self.count - self.increment_counter = increment_counter - - @self.app.task(shared=False) - def raising(): - raise KeyError('foo') - self.raising = raising - - @self.app.task(bind=True, max_retries=3, iterations=0, shared=False) - def retry_task(self, arg1, arg2, kwarg=1, max_retries=None, care=True): - self.iterations += 1 - rmax = self.max_retries if max_retries is None else max_retries - - assert repr(self.request) - retries = self.request.retries - if care and retries >= rmax: - return arg1 - else: - raise self.retry(countdown=0, max_retries=rmax) - self.retry_task = retry_task - - @self.app.task(bind=True, max_retries=3, iterations=0, shared=False) - def retry_task_noargs(self, **kwargs): - self.iterations += 1 - - if self.request.retries >= 3: - return 42 - else: - raise self.retry(countdown=0) - self.retry_task_noargs = retry_task_noargs - - @self.app.task(bind=True, max_retries=3, iterations=0, - base=MockApplyTask, shared=False) - def retry_task_mockapply(self, arg1, arg2, kwarg=1): - self.iterations += 1 - - retries = self.request.retries - if retries >= 3: - return arg1 - raise self.retry(countdown=0) - self.retry_task_mockapply = retry_task_mockapply - - @self.app.task(bind=True, max_retries=3, iterations=0, shared=False) - def retry_task_customexc(self, arg1, arg2, kwarg=1, **kwargs): - self.iterations += 1 - - retries = self.request.retries - if retries >= 3: - return arg1 + kwarg - else: - try: - raise MyCustomException('Elaine Marie Benes') - except MyCustomException as exc: - kwargs.update(kwarg=kwarg) - raise self.retry(countdown=0, exc=exc) - self.retry_task_customexc = retry_task_customexc - - -class MyCustomException(Exception): - """Random custom exception.""" - - -class test_task_retries(TasksCase): - - def test_retry(self): - self.retry_task.max_retries = 3 - self.retry_task.iterations = 0 - self.retry_task.apply([0xFF, 0xFFFF]) - self.assertEqual(self.retry_task.iterations, 4) - - self.retry_task.max_retries = 3 - self.retry_task.iterations = 0 - self.retry_task.apply([0xFF, 0xFFFF], {'max_retries': 10}) - self.assertEqual(self.retry_task.iterations, 11) - - def test_retry_no_args(self): - self.retry_task_noargs.max_retries = 3 - self.retry_task_noargs.iterations = 0 - self.retry_task_noargs.apply(propagate=True).get() - self.assertEqual(self.retry_task_noargs.iterations, 4) - - def test_retry_kwargs_can_be_empty(self): - self.retry_task_mockapply.push_request() - try: - with self.assertRaises(Retry): - import sys - try: - sys.exc_clear() - except AttributeError: - pass - self.retry_task_mockapply.retry(args=[4, 4], kwargs=None) - finally: - self.retry_task_mockapply.pop_request() - - def test_retry_not_eager(self): - self.retry_task_mockapply.push_request() - try: - self.retry_task_mockapply.request.called_directly = False - exc = Exception('baz') - try: - self.retry_task_mockapply.retry( - args=[4, 4], kwargs={'task_retries': 0}, - exc=exc, throw=False, - ) - self.assertTrue(self.retry_task_mockapply.applied) - finally: - self.retry_task_mockapply.applied = 0 - - try: - with self.assertRaises(Retry): - self.retry_task_mockapply.retry( - args=[4, 4], kwargs={'task_retries': 0}, - exc=exc, throw=True) - self.assertTrue(self.retry_task_mockapply.applied) - finally: - self.retry_task_mockapply.applied = 0 - finally: - self.retry_task_mockapply.pop_request() - - def test_retry_with_kwargs(self): - self.retry_task_customexc.max_retries = 3 - self.retry_task_customexc.iterations = 0 - self.retry_task_customexc.apply([0xFF, 0xFFFF], {'kwarg': 0xF}) - self.assertEqual(self.retry_task_customexc.iterations, 4) - - def test_retry_with_custom_exception(self): - self.retry_task_customexc.max_retries = 2 - self.retry_task_customexc.iterations = 0 - result = self.retry_task_customexc.apply( - [0xFF, 0xFFFF], {'kwarg': 0xF}, - ) - with self.assertRaises(MyCustomException): - result.get() - self.assertEqual(self.retry_task_customexc.iterations, 3) - - def test_max_retries_exceeded(self): - self.retry_task.max_retries = 2 - self.retry_task.iterations = 0 - result = self.retry_task.apply([0xFF, 0xFFFF], {'care': False}) - with self.assertRaises(self.retry_task.MaxRetriesExceededError): - result.get() - self.assertEqual(self.retry_task.iterations, 3) - - self.retry_task.max_retries = 1 - self.retry_task.iterations = 0 - result = self.retry_task.apply([0xFF, 0xFFFF], {'care': False}) - with self.assertRaises(self.retry_task.MaxRetriesExceededError): - result.get() - self.assertEqual(self.retry_task.iterations, 2) - - -class test_canvas_utils(TasksCase): - - def test_si(self): - self.assertTrue(self.retry_task.si()) - self.assertTrue(self.retry_task.si().immutable) - - def test_chunks(self): - self.assertTrue(self.retry_task.chunks(range(100), 10)) - - def test_map(self): - self.assertTrue(self.retry_task.map(range(100))) - - def test_starmap(self): - self.assertTrue(self.retry_task.starmap(range(100))) - - def test_on_success(self): - self.retry_task.on_success(1, 1, (), {}) - - -class test_tasks(TasksCase): - - def now(self): - return self.app.now() - - @depends_on_current_app - def test_unpickle_task(self): - import pickle - - @self.app.task(shared=True) - def xxx(): - pass - self.assertIs(pickle.loads(pickle.dumps(xxx)), xxx.app.tasks[xxx.name]) - - def test_AsyncResult(self): - task_id = uuid() - result = self.retry_task.AsyncResult(task_id) - self.assertEqual(result.backend, self.retry_task.backend) - self.assertEqual(result.id, task_id) - - def assertNextTaskDataEqual(self, consumer, presult, task_name, - test_eta=False, test_expires=False, **kwargs): - next_task = consumer.queues[0].get(accept=['pickle']) - task_data = next_task.decode() - self.assertEqual(task_data['id'], presult.id) - self.assertEqual(task_data['task'], task_name) - task_kwargs = task_data.get('kwargs', {}) - if test_eta: - self.assertIsInstance(task_data.get('eta'), string_t) - to_datetime = parse_iso8601(task_data.get('eta')) - self.assertIsInstance(to_datetime, datetime) - if test_expires: - self.assertIsInstance(task_data.get('expires'), string_t) - to_datetime = parse_iso8601(task_data.get('expires')) - self.assertIsInstance(to_datetime, datetime) - for arg_name, arg_value in items(kwargs): - self.assertEqual(task_kwargs.get(arg_name), arg_value) - - def test_incomplete_task_cls(self): - - class IncompleteTask(Task): - app = self.app - name = 'c.unittest.t.itask' - - with self.assertRaises(NotImplementedError): - IncompleteTask().run() - - def test_task_kwargs_must_be_dictionary(self): - with self.assertRaises(ValueError): - self.increment_counter.apply_async([], 'str') - - def test_task_args_must_be_list(self): - with self.assertRaises(ValueError): - self.increment_counter.apply_async('str', {}) - - def test_regular_task(self): - self.assertIsInstance(self.mytask, Task) - self.assertTrue(self.mytask.run()) - self.assertTrue( - callable(self.mytask), 'Task class is callable()', - ) - self.assertTrue(self.mytask(), 'Task class runs run() when called') - - with self.app.connection_or_acquire() as conn: - consumer = self.app.amqp.TaskConsumer(conn) - with self.assertRaises(NotImplementedError): - consumer.receive('foo', 'foo') - consumer.purge() - self.assertIsNone(consumer.queues[0].get()) - self.app.amqp.TaskConsumer(conn, queues=[Queue('foo')]) - - # Without arguments. - presult = self.mytask.delay() - self.assertNextTaskDataEqual(consumer, presult, self.mytask.name) - - # With arguments. - presult2 = self.mytask.apply_async( - kwargs=dict(name='George Costanza'), - ) - self.assertNextTaskDataEqual( - consumer, presult2, self.mytask.name, name='George Costanza', - ) - - # send_task - sresult = self.app.send_task(self.mytask.name, - kwargs=dict(name='Elaine M. Benes')) - self.assertNextTaskDataEqual( - consumer, sresult, self.mytask.name, name='Elaine M. Benes', - ) - - # With eta. - presult2 = self.mytask.apply_async( - kwargs=dict(name='George Costanza'), - eta=self.now() + timedelta(days=1), - expires=self.now() + timedelta(days=2), - ) - self.assertNextTaskDataEqual( - consumer, presult2, self.mytask.name, - name='George Costanza', test_eta=True, test_expires=True, - ) - - # With countdown. - presult2 = self.mytask.apply_async( - kwargs=dict(name='George Costanza'), countdown=10, expires=12, - ) - self.assertNextTaskDataEqual( - consumer, presult2, self.mytask.name, - name='George Costanza', test_eta=True, test_expires=True, - ) - - # Discarding all tasks. - consumer.purge() - self.mytask.apply_async() - self.assertEqual(consumer.purge(), 1) - self.assertIsNone(consumer.queues[0].get()) - - self.assertFalse(presult.successful()) - self.mytask.backend.mark_as_done(presult.id, result=None) - self.assertTrue(presult.successful()) - - def test_repr_v2_compat(self): - self.mytask.__v2_compat__ = True - self.assertIn('v2 compatible', repr(self.mytask)) - - def test_apply_with_self(self): - - @self.app.task(__self__=42, shared=False) - def tawself(self): - return self - - self.assertEqual(tawself.apply().get(), 42) - - self.assertEqual(tawself(), 42) - - def test_context_get(self): - self.mytask.push_request() - try: - request = self.mytask.request - request.foo = 32 - self.assertEqual(request.get('foo'), 32) - self.assertEqual(request.get('bar', 36), 36) - request.clear() - finally: - self.mytask.pop_request() - - def test_task_class_repr(self): - self.assertIn('class Task of', repr(self.mytask.app.Task)) - self.mytask.app.Task._app = None - self.assertIn('unbound', repr(self.mytask.app.Task, )) - - def test_bind_no_magic_kwargs(self): - self.mytask.accept_magic_kwargs = None - self.mytask.bind(self.mytask.app) - - def test_annotate(self): - with patch('celery.app.task.resolve_all_annotations') as anno: - anno.return_value = [{'FOO': 'BAR'}] - - @self.app.task(shared=False) - def task(): - pass - task.annotate() - self.assertEqual(task.FOO, 'BAR') - - def test_after_return(self): - self.mytask.push_request() - try: - self.mytask.request.chord = self.mytask.s() - self.mytask.after_return('SUCCESS', 1.0, 'foobar', (), {}, None) - self.mytask.request.clear() - finally: - self.mytask.pop_request() - - def test_send_task_sent_event(self): - with self.app.connection() as conn: - self.app.conf.CELERY_SEND_TASK_SENT_EVENT = True - self.assertTrue(self.app.amqp.TaskProducer(conn).send_sent_event) - - def test_update_state(self): - - @self.app.task(shared=False) - def yyy(): - pass - - yyy.push_request() - try: - tid = uuid() - yyy.update_state(tid, 'FROBULATING', {'fooz': 'baaz'}) - self.assertEqual(yyy.AsyncResult(tid).status, 'FROBULATING') - self.assertDictEqual(yyy.AsyncResult(tid).result, {'fooz': 'baaz'}) - - yyy.request.id = tid - yyy.update_state(state='FROBUZATING', meta={'fooz': 'baaz'}) - self.assertEqual(yyy.AsyncResult(tid).status, 'FROBUZATING') - self.assertDictEqual(yyy.AsyncResult(tid).result, {'fooz': 'baaz'}) - finally: - yyy.pop_request() - - def test_repr(self): - - @self.app.task(shared=False) - def task_test_repr(): - pass - - self.assertIn('task_test_repr', repr(task_test_repr)) - - def test_has___name__(self): - - @self.app.task(shared=False) - def yyy2(): - pass - - self.assertTrue(yyy2.__name__) - - -class test_apply_task(TasksCase): - - def test_apply_throw(self): - with self.assertRaises(KeyError): - self.raising.apply(throw=True) - - def test_apply_with_CELERY_EAGER_PROPAGATES_EXCEPTIONS(self): - self.app.conf.CELERY_EAGER_PROPAGATES_EXCEPTIONS = True - with self.assertRaises(KeyError): - self.raising.apply() - - def test_apply(self): - self.increment_counter.count = 0 - - e = self.increment_counter.apply() - self.assertIsInstance(e, EagerResult) - self.assertEqual(e.get(), 1) - - e = self.increment_counter.apply(args=[1]) - self.assertEqual(e.get(), 2) - - e = self.increment_counter.apply(kwargs={'increment_by': 4}) - self.assertEqual(e.get(), 6) - - self.assertTrue(e.successful()) - self.assertTrue(e.ready()) - self.assertTrue(repr(e).startswith('> 2, Proxy(lambda: 2)) - self.assertEqual(Proxy(lambda: 10) ^ 7, Proxy(lambda: 13)) - self.assertEqual(Proxy(lambda: 10) | 40, Proxy(lambda: 42)) - self.assertEqual(~Proxy(lambda: 10), Proxy(lambda: -11)) - self.assertEqual(-Proxy(lambda: 10), Proxy(lambda: -10)) - self.assertEqual(+Proxy(lambda: -10), Proxy(lambda: -10)) - self.assertTrue(Proxy(lambda: 10) < Proxy(lambda: 20)) - self.assertTrue(Proxy(lambda: 20) > Proxy(lambda: 10)) - self.assertTrue(Proxy(lambda: 10) >= Proxy(lambda: 10)) - self.assertTrue(Proxy(lambda: 10) <= Proxy(lambda: 10)) - self.assertTrue(Proxy(lambda: 10) == Proxy(lambda: 10)) - self.assertTrue(Proxy(lambda: 20) != Proxy(lambda: 10)) - self.assertTrue(Proxy(lambda: 100).__divmod__(30)) - self.assertTrue(Proxy(lambda: 100).__truediv__(30)) - self.assertTrue(abs(Proxy(lambda: -100))) - - x = Proxy(lambda: 10) - x -= 1 - self.assertEqual(x, 9) - x = Proxy(lambda: 9) - x += 1 - self.assertEqual(x, 10) - x = Proxy(lambda: 10) - x *= 2 - self.assertEqual(x, 20) - x = Proxy(lambda: 20) - x /= 2 - self.assertEqual(x, 10) - x = Proxy(lambda: 10) - x %= 2 - self.assertEqual(x, 0) - x = Proxy(lambda: 10) - x <<= 3 - self.assertEqual(x, 80) - x = Proxy(lambda: 80) - x >>= 4 - self.assertEqual(x, 5) - x = Proxy(lambda: 5) - x ^= 1 - self.assertEqual(x, 4) - x = Proxy(lambda: 4) - x **= 4 - self.assertEqual(x, 256) - x = Proxy(lambda: 256) - x //= 2 - self.assertEqual(x, 128) - x = Proxy(lambda: 128) - x |= 2 - self.assertEqual(x, 130) - x = Proxy(lambda: 130) - x &= 10 - self.assertEqual(x, 2) - - x = Proxy(lambda: 10) - self.assertEqual(type(x.__float__()), float) - self.assertEqual(type(x.__int__()), int) - if not PY3: - self.assertEqual(type(x.__long__()), long_t) - self.assertTrue(hex(x)) - self.assertTrue(oct(x)) - - def test_hash(self): - - class X(object): - - def __hash__(self): - return 1234 - - self.assertEqual(hash(Proxy(lambda: X())), 1234) - - def test_call(self): - - class X(object): - - def __call__(self): - return 1234 - - self.assertEqual(Proxy(lambda: X())(), 1234) - - def test_context(self): - - class X(object): - entered = exited = False - - def __enter__(self): - self.entered = True - return 1234 - - def __exit__(self, *exc_info): - self.exited = True - - v = X() - x = Proxy(lambda: v) - with x as val: - self.assertEqual(val, 1234) - self.assertTrue(x.entered) - self.assertTrue(x.exited) - - def test_reduce(self): - - class X(object): - - def __reduce__(self): - return 123 - - x = Proxy(lambda: X()) - self.assertEqual(x.__reduce__(), 123) - - -class test_PromiseProxy(Case): - - def test_only_evaluated_once(self): - - class X(object): - attr = 123 - evals = 0 - - def __init__(self): - self.__class__.evals += 1 - - p = PromiseProxy(X) - self.assertEqual(p.attr, 123) - self.assertEqual(p.attr, 123) - self.assertEqual(X.evals, 1) - - def test_callbacks(self): - source = Mock(name='source') - p = PromiseProxy(source) - cbA = Mock(name='cbA') - cbB = Mock(name='cbB') - cbC = Mock(name='cbC') - p.__then__(cbA, p) - p.__then__(cbB, p) - self.assertFalse(p.__evaluated__()) - self.assertTrue(object.__getattribute__(p, '__pending__')) - - self.assertTrue(repr(p)) - self.assertTrue(p.__evaluated__()) - with self.assertRaises(AttributeError): - object.__getattribute__(p, '__pending__') - cbA.assert_called_with(p) - cbB.assert_called_with(p) - - self.assertTrue(p.__evaluated__()) - p.__then__(cbC, p) - cbC.assert_called_with(p) - - with self.assertRaises(AttributeError): - object.__getattribute__(p, '__pending__') - - def test_maybe_evaluate(self): - x = PromiseProxy(lambda: 30) - self.assertFalse(x.__evaluated__()) - self.assertEqual(maybe_evaluate(x), 30) - self.assertEqual(maybe_evaluate(x), 30) - - self.assertEqual(maybe_evaluate(30), 30) - self.assertTrue(x.__evaluated__()) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_mail.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_mail.py deleted file mode 100644 index e4fc965..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_mail.py +++ /dev/null @@ -1,53 +0,0 @@ -from __future__ import absolute_import - -from celery.utils.mail import Message, Mailer, SSLError - -from celery.tests.case import Case, Mock, patch - - -msg = Message(to='george@vandelay.com', sender='elaine@pendant.com', - subject="What's up with Jerry?", body='???!') - - -class test_Message(Case): - - def test_repr(self): - self.assertTrue(repr(msg)) - - def test_str(self): - self.assertTrue(str(msg)) - - -class test_Mailer(Case): - - def test_send_wrapper(self): - mailer = Mailer() - mailer._send = Mock() - mailer.send(msg) - mailer._send.assert_called_with(msg) - - @patch('smtplib.SMTP_SSL', create=True) - def test_send_ssl_tls(self, SMTP_SSL): - mailer = Mailer(use_ssl=True, use_tls=True) - client = SMTP_SSL.return_value = Mock() - mailer._send(msg) - self.assertTrue(client.starttls.called) - self.assertEqual(client.ehlo.call_count, 2) - client.quit.assert_called_with() - client.sendmail.assert_called_with(msg.sender, msg.to, str(msg)) - mailer = Mailer(use_ssl=True, use_tls=True, user='foo', - password='bar') - mailer._send(msg) - client.login.assert_called_with('foo', 'bar') - - @patch('smtplib.SMTP') - def test_send(self, SMTP): - client = SMTP.return_value = Mock() - mailer = Mailer(use_ssl=False, use_tls=False) - mailer._send(msg) - - client.sendmail.assert_called_with(msg.sender, msg.to, str(msg)) - - client.quit.side_effect = SSLError() - mailer._send(msg) - client.close.assert_called_with() diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_pickle.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_pickle.py deleted file mode 100644 index 6b65bb3..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_pickle.py +++ /dev/null @@ -1,51 +0,0 @@ -from __future__ import absolute_import - -from celery.utils.serialization import pickle -from celery.tests.case import Case - - -class RegularException(Exception): - pass - - -class ArgOverrideException(Exception): - - def __init__(self, message, status_code=10): - self.status_code = status_code - Exception.__init__(self, message, status_code) - - -class test_Pickle(Case): - - def test_pickle_regular_exception(self): - exc = None - try: - raise RegularException('RegularException raised') - except RegularException as exc_: - exc = exc_ - - pickled = pickle.dumps({'exception': exc}) - unpickled = pickle.loads(pickled) - exception = unpickled.get('exception') - self.assertTrue(exception) - self.assertIsInstance(exception, RegularException) - self.assertTupleEqual(exception.args, ('RegularException raised', )) - - def test_pickle_arg_override_exception(self): - - exc = None - try: - raise ArgOverrideException( - 'ArgOverrideException raised', status_code=100, - ) - except ArgOverrideException as exc_: - exc = exc_ - - pickled = pickle.dumps({'exception': exc}) - unpickled = pickle.loads(pickled) - exception = unpickled.get('exception') - self.assertTrue(exception) - self.assertIsInstance(exception, ArgOverrideException) - self.assertTupleEqual(exception.args, ( - 'ArgOverrideException raised', 100)) - self.assertEqual(exception.status_code, 100) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_platforms.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_platforms.py deleted file mode 100644 index 4f2c584..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_platforms.py +++ /dev/null @@ -1,713 +0,0 @@ -from __future__ import absolute_import - -import errno -import os -import sys -import signal -import tempfile - -from celery import _find_option_with_arg -from celery import platforms -from celery.five import open_fqdn -from celery.platforms import ( - get_fdmax, - ignore_errno, - set_process_title, - signals, - maybe_drop_privileges, - setuid, - setgid, - initgroups, - parse_uid, - parse_gid, - detached, - DaemonContext, - create_pidlock, - Pidfile, - LockFailed, - setgroups, - _setgroups_hack, - close_open_fds, - fd_by_path, -) - -try: - import resource -except ImportError: # pragma: no cover - resource = None # noqa - -from celery.tests.case import ( - Case, WhateverIO, Mock, SkipTest, - call, override_stdouts, mock_open, patch, -) - - -class test_find_option_with_arg(Case): - - def test_long_opt(self): - self.assertEqual( - _find_option_with_arg(['--foo=bar'], long_opts=['--foo']), - 'bar' - ) - - def test_short_opt(self): - self.assertEqual( - _find_option_with_arg(['-f', 'bar'], short_opts=['-f']), - 'bar' - ) - - -class test_fd_by_path(Case): - - def test_finds(self): - test_file = tempfile.NamedTemporaryFile() - keep = fd_by_path([test_file.name]) - self.assertEqual(keep, [test_file.file.fileno()]) - test_file.close() - - -class test_close_open_fds(Case): - - def test_closes(self): - with patch('os.close') as _close: - with patch('os.closerange', create=True) as closerange: - with patch('celery.platforms.get_fdmax') as fdmax: - fdmax.return_value = 3 - close_open_fds() - if not closerange.called: - _close.assert_has_calls([call(2), call(1), call(0)]) - _close.side_effect = OSError() - _close.side_effect.errno = errno.EBADF - close_open_fds() - - -class test_ignore_errno(Case): - - def test_raises_EBADF(self): - with ignore_errno('EBADF'): - exc = OSError() - exc.errno = errno.EBADF - raise exc - - def test_otherwise(self): - with self.assertRaises(OSError): - with ignore_errno('EBADF'): - exc = OSError() - exc.errno = errno.ENOENT - raise exc - - -class test_set_process_title(Case): - - def when_no_setps(self): - prev = platforms._setproctitle = platforms._setproctitle, None - try: - set_process_title('foo') - finally: - platforms._setproctitle = prev - - -class test_Signals(Case): - - @patch('signal.getsignal') - def test_getitem(self, getsignal): - signals['SIGINT'] - getsignal.assert_called_with(signal.SIGINT) - - def test_supported(self): - self.assertTrue(signals.supported('INT')) - self.assertFalse(signals.supported('SIGIMAGINARY')) - - def test_reset_alarm(self): - if sys.platform == 'win32': - raise SkipTest('signal.alarm not available on Windows') - with patch('signal.alarm') as _alarm: - signals.reset_alarm() - _alarm.assert_called_with(0) - - def test_arm_alarm(self): - if hasattr(signal, 'setitimer'): - with patch('signal.setitimer', create=True) as seti: - signals.arm_alarm(30) - self.assertTrue(seti.called) - - def test_signum(self): - self.assertEqual(signals.signum(13), 13) - self.assertEqual(signals.signum('INT'), signal.SIGINT) - self.assertEqual(signals.signum('SIGINT'), signal.SIGINT) - with self.assertRaises(TypeError): - signals.signum('int') - signals.signum(object()) - - @patch('signal.signal') - def test_ignore(self, set): - signals.ignore('SIGINT') - set.assert_called_with(signals.signum('INT'), signals.ignored) - signals.ignore('SIGTERM') - set.assert_called_with(signals.signum('TERM'), signals.ignored) - - @patch('signal.signal') - def test_setitem(self, set): - def handle(*a): - return a - signals['INT'] = handle - set.assert_called_with(signal.SIGINT, handle) - - @patch('signal.signal') - def test_setitem_raises(self, set): - set.side_effect = ValueError() - signals['INT'] = lambda *a: a - - -if not platforms.IS_WINDOWS: - - class test_get_fdmax(Case): - - @patch('resource.getrlimit') - def test_when_infinity(self, getrlimit): - with patch('os.sysconf') as sysconfig: - sysconfig.side_effect = KeyError() - getrlimit.return_value = [None, resource.RLIM_INFINITY] - default = object() - self.assertIs(get_fdmax(default), default) - - @patch('resource.getrlimit') - def test_when_actual(self, getrlimit): - with patch('os.sysconf') as sysconfig: - sysconfig.side_effect = KeyError() - getrlimit.return_value = [None, 13] - self.assertEqual(get_fdmax(None), 13) - - class test_maybe_drop_privileges(Case): - - @patch('celery.platforms.parse_uid') - @patch('pwd.getpwuid') - @patch('celery.platforms.setgid') - @patch('celery.platforms.setuid') - @patch('celery.platforms.initgroups') - def test_with_uid(self, initgroups, setuid, setgid, - getpwuid, parse_uid): - - class pw_struct(object): - pw_gid = 50001 - - def raise_on_second_call(*args, **kwargs): - setuid.side_effect = OSError() - setuid.side_effect.errno = errno.EPERM - setuid.side_effect = raise_on_second_call - getpwuid.return_value = pw_struct() - parse_uid.return_value = 5001 - maybe_drop_privileges(uid='user') - parse_uid.assert_called_with('user') - getpwuid.assert_called_with(5001) - setgid.assert_called_with(50001) - initgroups.assert_called_with(5001, 50001) - setuid.assert_has_calls([call(5001), call(0)]) - - @patch('celery.platforms.parse_uid') - @patch('celery.platforms.parse_gid') - @patch('celery.platforms.setgid') - @patch('celery.platforms.setuid') - @patch('celery.platforms.initgroups') - def test_with_guid(self, initgroups, setuid, setgid, - parse_gid, parse_uid): - - def raise_on_second_call(*args, **kwargs): - setuid.side_effect = OSError() - setuid.side_effect.errno = errno.EPERM - setuid.side_effect = raise_on_second_call - parse_uid.return_value = 5001 - parse_gid.return_value = 50001 - maybe_drop_privileges(uid='user', gid='group') - parse_uid.assert_called_with('user') - parse_gid.assert_called_with('group') - setgid.assert_called_with(50001) - initgroups.assert_called_with(5001, 50001) - setuid.assert_has_calls([call(5001), call(0)]) - - setuid.side_effect = None - with self.assertRaises(RuntimeError): - maybe_drop_privileges(uid='user', gid='group') - setuid.side_effect = OSError() - setuid.side_effect.errno = errno.EINVAL - with self.assertRaises(OSError): - maybe_drop_privileges(uid='user', gid='group') - - @patch('celery.platforms.setuid') - @patch('celery.platforms.setgid') - @patch('celery.platforms.parse_gid') - def test_only_gid(self, parse_gid, setgid, setuid): - parse_gid.return_value = 50001 - maybe_drop_privileges(gid='group') - parse_gid.assert_called_with('group') - setgid.assert_called_with(50001) - self.assertFalse(setuid.called) - - class test_setget_uid_gid(Case): - - @patch('celery.platforms.parse_uid') - @patch('os.setuid') - def test_setuid(self, _setuid, parse_uid): - parse_uid.return_value = 5001 - setuid('user') - parse_uid.assert_called_with('user') - _setuid.assert_called_with(5001) - - @patch('celery.platforms.parse_gid') - @patch('os.setgid') - def test_setgid(self, _setgid, parse_gid): - parse_gid.return_value = 50001 - setgid('group') - parse_gid.assert_called_with('group') - _setgid.assert_called_with(50001) - - def test_parse_uid_when_int(self): - self.assertEqual(parse_uid(5001), 5001) - - @patch('pwd.getpwnam') - def test_parse_uid_when_existing_name(self, getpwnam): - - class pwent(object): - pw_uid = 5001 - - getpwnam.return_value = pwent() - self.assertEqual(parse_uid('user'), 5001) - - @patch('pwd.getpwnam') - def test_parse_uid_when_nonexisting_name(self, getpwnam): - getpwnam.side_effect = KeyError('user') - - with self.assertRaises(KeyError): - parse_uid('user') - - def test_parse_gid_when_int(self): - self.assertEqual(parse_gid(50001), 50001) - - @patch('grp.getgrnam') - def test_parse_gid_when_existing_name(self, getgrnam): - - class grent(object): - gr_gid = 50001 - - getgrnam.return_value = grent() - self.assertEqual(parse_gid('group'), 50001) - - @patch('grp.getgrnam') - def test_parse_gid_when_nonexisting_name(self, getgrnam): - getgrnam.side_effect = KeyError('group') - - with self.assertRaises(KeyError): - parse_gid('group') - - class test_initgroups(Case): - - @patch('pwd.getpwuid') - @patch('os.initgroups', create=True) - def test_with_initgroups(self, initgroups_, getpwuid): - getpwuid.return_value = ['user'] - initgroups(5001, 50001) - initgroups_.assert_called_with('user', 50001) - - @patch('celery.platforms.setgroups') - @patch('grp.getgrall') - @patch('pwd.getpwuid') - def test_without_initgroups(self, getpwuid, getgrall, setgroups): - prev = getattr(os, 'initgroups', None) - try: - delattr(os, 'initgroups') - except AttributeError: - pass - try: - getpwuid.return_value = ['user'] - - class grent(object): - gr_mem = ['user'] - - def __init__(self, gid): - self.gr_gid = gid - - getgrall.return_value = [grent(1), grent(2), grent(3)] - initgroups(5001, 50001) - setgroups.assert_called_with([1, 2, 3]) - finally: - if prev: - os.initgroups = prev - - class test_detached(Case): - - def test_without_resource(self): - prev, platforms.resource = platforms.resource, None - try: - with self.assertRaises(RuntimeError): - detached() - finally: - platforms.resource = prev - - @patch('celery.platforms._create_pidlock') - @patch('celery.platforms.signals') - @patch('celery.platforms.maybe_drop_privileges') - @patch('os.geteuid') - @patch(open_fqdn) - def test_default(self, open, geteuid, maybe_drop, - signals, pidlock): - geteuid.return_value = 0 - context = detached(uid='user', gid='group') - self.assertIsInstance(context, DaemonContext) - signals.reset.assert_called_with('SIGCLD') - maybe_drop.assert_called_with(uid='user', gid='group') - open.return_value = Mock() - - geteuid.return_value = 5001 - context = detached(uid='user', gid='group', logfile='/foo/bar') - self.assertIsInstance(context, DaemonContext) - self.assertTrue(context.after_chdir) - context.after_chdir() - open.assert_called_with('/foo/bar', 'a') - open.return_value.close.assert_called_with() - - context = detached(pidfile='/foo/bar/pid') - self.assertIsInstance(context, DaemonContext) - self.assertTrue(context.after_chdir) - context.after_chdir() - pidlock.assert_called_with('/foo/bar/pid') - - class test_DaemonContext(Case): - - @patch('os.fork') - @patch('os.setsid') - @patch('os._exit') - @patch('os.chdir') - @patch('os.umask') - @patch('os.close') - @patch('os.closerange') - @patch('os.open') - @patch('os.dup2') - def test_open(self, dup2, open, close, closer, umask, chdir, - _exit, setsid, fork): - x = DaemonContext(workdir='/opt/workdir', umask=0o22) - x.stdfds = [0, 1, 2] - - fork.return_value = 0 - with x: - self.assertTrue(x._is_open) - with x: - pass - self.assertEqual(fork.call_count, 2) - setsid.assert_called_with() - self.assertFalse(_exit.called) - - chdir.assert_called_with(x.workdir) - umask.assert_called_with(0o22) - self.assertTrue(dup2.called) - - fork.reset_mock() - fork.return_value = 1 - x = DaemonContext(workdir='/opt/workdir') - x.stdfds = [0, 1, 2] - with x: - pass - self.assertEqual(fork.call_count, 1) - _exit.assert_called_with(0) - - x = DaemonContext(workdir='/opt/workdir', fake=True) - x.stdfds = [0, 1, 2] - x._detach = Mock() - with x: - pass - self.assertFalse(x._detach.called) - - x.after_chdir = Mock() - with x: - pass - x.after_chdir.assert_called_with() - - class test_Pidfile(Case): - - @patch('celery.platforms.Pidfile') - def test_create_pidlock(self, Pidfile): - p = Pidfile.return_value = Mock() - p.is_locked.return_value = True - p.remove_if_stale.return_value = False - with override_stdouts() as (_, err): - with self.assertRaises(SystemExit): - create_pidlock('/var/pid') - self.assertIn('already exists', err.getvalue()) - - p.remove_if_stale.return_value = True - ret = create_pidlock('/var/pid') - self.assertIs(ret, p) - - def test_context(self): - p = Pidfile('/var/pid') - p.write_pid = Mock() - p.remove = Mock() - - with p as _p: - self.assertIs(_p, p) - p.write_pid.assert_called_with() - p.remove.assert_called_with() - - def test_acquire_raises_LockFailed(self): - p = Pidfile('/var/pid') - p.write_pid = Mock() - p.write_pid.side_effect = OSError() - - with self.assertRaises(LockFailed): - with p: - pass - - @patch('os.path.exists') - def test_is_locked(self, exists): - p = Pidfile('/var/pid') - exists.return_value = True - self.assertTrue(p.is_locked()) - exists.return_value = False - self.assertFalse(p.is_locked()) - - def test_read_pid(self): - with mock_open() as s: - s.write('1816\n') - s.seek(0) - p = Pidfile('/var/pid') - self.assertEqual(p.read_pid(), 1816) - - def test_read_pid_partially_written(self): - with mock_open() as s: - s.write('1816') - s.seek(0) - p = Pidfile('/var/pid') - with self.assertRaises(ValueError): - p.read_pid() - - def test_read_pid_raises_ENOENT(self): - exc = IOError() - exc.errno = errno.ENOENT - with mock_open(side_effect=exc): - p = Pidfile('/var/pid') - self.assertIsNone(p.read_pid()) - - def test_read_pid_raises_IOError(self): - exc = IOError() - exc.errno = errno.EAGAIN - with mock_open(side_effect=exc): - p = Pidfile('/var/pid') - with self.assertRaises(IOError): - p.read_pid() - - def test_read_pid_bogus_pidfile(self): - with mock_open() as s: - s.write('eighteensixteen\n') - s.seek(0) - p = Pidfile('/var/pid') - with self.assertRaises(ValueError): - p.read_pid() - - @patch('os.unlink') - def test_remove(self, unlink): - unlink.return_value = True - p = Pidfile('/var/pid') - p.remove() - unlink.assert_called_with(p.path) - - @patch('os.unlink') - def test_remove_ENOENT(self, unlink): - exc = OSError() - exc.errno = errno.ENOENT - unlink.side_effect = exc - p = Pidfile('/var/pid') - p.remove() - unlink.assert_called_with(p.path) - - @patch('os.unlink') - def test_remove_EACCES(self, unlink): - exc = OSError() - exc.errno = errno.EACCES - unlink.side_effect = exc - p = Pidfile('/var/pid') - p.remove() - unlink.assert_called_with(p.path) - - @patch('os.unlink') - def test_remove_OSError(self, unlink): - exc = OSError() - exc.errno = errno.EAGAIN - unlink.side_effect = exc - p = Pidfile('/var/pid') - with self.assertRaises(OSError): - p.remove() - unlink.assert_called_with(p.path) - - @patch('os.kill') - def test_remove_if_stale_process_alive(self, kill): - p = Pidfile('/var/pid') - p.read_pid = Mock() - p.read_pid.return_value = 1816 - kill.return_value = 0 - self.assertFalse(p.remove_if_stale()) - kill.assert_called_with(1816, 0) - p.read_pid.assert_called_with() - - kill.side_effect = OSError() - kill.side_effect.errno = errno.ENOENT - self.assertFalse(p.remove_if_stale()) - - @patch('os.kill') - def test_remove_if_stale_process_dead(self, kill): - with override_stdouts(): - p = Pidfile('/var/pid') - p.read_pid = Mock() - p.read_pid.return_value = 1816 - p.remove = Mock() - exc = OSError() - exc.errno = errno.ESRCH - kill.side_effect = exc - self.assertTrue(p.remove_if_stale()) - kill.assert_called_with(1816, 0) - p.remove.assert_called_with() - - def test_remove_if_stale_broken_pid(self): - with override_stdouts(): - p = Pidfile('/var/pid') - p.read_pid = Mock() - p.read_pid.side_effect = ValueError() - p.remove = Mock() - - self.assertTrue(p.remove_if_stale()) - p.remove.assert_called_with() - - def test_remove_if_stale_no_pidfile(self): - p = Pidfile('/var/pid') - p.read_pid = Mock() - p.read_pid.return_value = None - p.remove = Mock() - - self.assertTrue(p.remove_if_stale()) - p.remove.assert_called_with() - - @patch('os.fsync') - @patch('os.getpid') - @patch('os.open') - @patch('os.fdopen') - @patch(open_fqdn) - def test_write_pid(self, open_, fdopen, osopen, getpid, fsync): - getpid.return_value = 1816 - osopen.return_value = 13 - w = fdopen.return_value = WhateverIO() - w.close = Mock() - r = open_.return_value = WhateverIO() - r.write('1816\n') - r.seek(0) - - p = Pidfile('/var/pid') - p.write_pid() - w.seek(0) - self.assertEqual(w.readline(), '1816\n') - self.assertTrue(w.close.called) - getpid.assert_called_with() - osopen.assert_called_with(p.path, platforms.PIDFILE_FLAGS, - platforms.PIDFILE_MODE) - fdopen.assert_called_with(13, 'w') - fsync.assert_called_with(13) - open_.assert_called_with(p.path) - - @patch('os.fsync') - @patch('os.getpid') - @patch('os.open') - @patch('os.fdopen') - @patch(open_fqdn) - def test_write_reread_fails(self, open_, fdopen, - osopen, getpid, fsync): - getpid.return_value = 1816 - osopen.return_value = 13 - w = fdopen.return_value = WhateverIO() - w.close = Mock() - r = open_.return_value = WhateverIO() - r.write('11816\n') - r.seek(0) - - p = Pidfile('/var/pid') - with self.assertRaises(LockFailed): - p.write_pid() - - class test_setgroups(Case): - - @patch('os.setgroups', create=True) - def test_setgroups_hack_ValueError(self, setgroups): - - def on_setgroups(groups): - if len(groups) <= 200: - setgroups.return_value = True - return - raise ValueError() - setgroups.side_effect = on_setgroups - _setgroups_hack(list(range(400))) - - setgroups.side_effect = ValueError() - with self.assertRaises(ValueError): - _setgroups_hack(list(range(400))) - - @patch('os.setgroups', create=True) - def test_setgroups_hack_OSError(self, setgroups): - exc = OSError() - exc.errno = errno.EINVAL - - def on_setgroups(groups): - if len(groups) <= 200: - setgroups.return_value = True - return - raise exc - setgroups.side_effect = on_setgroups - - _setgroups_hack(list(range(400))) - - setgroups.side_effect = exc - with self.assertRaises(OSError): - _setgroups_hack(list(range(400))) - - exc2 = OSError() - exc.errno = errno.ESRCH - setgroups.side_effect = exc2 - with self.assertRaises(OSError): - _setgroups_hack(list(range(400))) - - @patch('os.sysconf') - @patch('celery.platforms._setgroups_hack') - def test_setgroups(self, hack, sysconf): - sysconf.return_value = 100 - setgroups(list(range(400))) - hack.assert_called_with(list(range(100))) - - @patch('os.sysconf') - @patch('celery.platforms._setgroups_hack') - def test_setgroups_sysconf_raises(self, hack, sysconf): - sysconf.side_effect = ValueError() - setgroups(list(range(400))) - hack.assert_called_with(list(range(400))) - - @patch('os.getgroups') - @patch('os.sysconf') - @patch('celery.platforms._setgroups_hack') - def test_setgroups_raises_ESRCH(self, hack, sysconf, getgroups): - sysconf.side_effect = ValueError() - esrch = OSError() - esrch.errno = errno.ESRCH - hack.side_effect = esrch - with self.assertRaises(OSError): - setgroups(list(range(400))) - - @patch('os.getgroups') - @patch('os.sysconf') - @patch('celery.platforms._setgroups_hack') - def test_setgroups_raises_EPERM(self, hack, sysconf, getgroups): - sysconf.side_effect = ValueError() - eperm = OSError() - eperm.errno = errno.EPERM - hack.side_effect = eperm - getgroups.return_value = list(range(400)) - setgroups(list(range(400))) - getgroups.assert_called_with() - - getgroups.return_value = [1000] - with self.assertRaises(OSError): - setgroups(list(range(400))) - getgroups.assert_called_with() diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_saferef.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_saferef.py deleted file mode 100644 index 9c18d71..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_saferef.py +++ /dev/null @@ -1,94 +0,0 @@ -from __future__ import absolute_import - -from celery.five import range -from celery.utils.dispatch.saferef import safe_ref -from celery.tests.case import Case - - -class Class1(object): - - def x(self): - pass - - -def fun(obj): - pass - - -class Class2(object): - - def __call__(self, obj): - pass - - -class SaferefTests(Case): - - def setUp(self): - ts = [] - ss = [] - for x in range(5000): - t = Class1() - ts.append(t) - s = safe_ref(t.x, self._closure) - ss.append(s) - ts.append(fun) - ss.append(safe_ref(fun, self._closure)) - for x in range(30): - t = Class2() - ts.append(t) - s = safe_ref(t, self._closure) - ss.append(s) - self.ts = ts - self.ss = ss - self.closureCount = 0 - - def tearDown(self): - del self.ts - del self.ss - - def test_in(self): - """test_in - - Test the "in" operator for safe references (cmp) - - """ - for t in self.ts[:50]: - self.assertTrue(safe_ref(t.x) in self.ss) - - def test_valid(self): - """test_value - - Test that the references are valid (return instance methods) - - """ - for s in self.ss: - self.assertTrue(s()) - - def test_shortcircuit(self): - """test_shortcircuit - - Test that creation short-circuits to reuse existing references - - """ - sd = {} - for s in self.ss: - sd[s] = 1 - for t in self.ts: - if hasattr(t, 'x'): - self.assertIn(safe_ref(t.x), sd) - else: - self.assertIn(safe_ref(t), sd) - - def test_representation(self): - """test_representation - - Test that the reference object's representation works - - XXX Doesn't currently check the results, just that no error - is raised - """ - repr(self.ss[-1]) - - def _closure(self, ref): - """Dumb utility mechanism to increment deletion counter""" - self.closureCount += 1 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_serialization.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_serialization.py deleted file mode 100644 index 53dfdad..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_serialization.py +++ /dev/null @@ -1,42 +0,0 @@ -from __future__ import absolute_import - -import sys - -from celery.utils.serialization import ( - UnpickleableExceptionWrapper, - get_pickleable_etype, -) - -from celery.tests.case import Case, mask_modules - - -class test_AAPickle(Case): - - def test_no_cpickle(self): - prev = sys.modules.pop('celery.utils.serialization', None) - try: - with mask_modules('cPickle'): - from celery.utils.serialization import pickle - import pickle as orig_pickle - self.assertIs(pickle.dumps, orig_pickle.dumps) - finally: - sys.modules['celery.utils.serialization'] = prev - - -class test_UnpickleExceptionWrapper(Case): - - def test_init(self): - x = UnpickleableExceptionWrapper('foo', 'Bar', [10, lambda x: x]) - self.assertTrue(x.exc_args) - self.assertEqual(len(x.exc_args), 2) - - -class test_get_pickleable_etype(Case): - - def test_get_pickleable_etype(self): - - class Unpickleable(Exception): - def __reduce__(self): - raise ValueError('foo') - - self.assertIs(get_pickleable_etype(Unpickleable), Exception) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_sysinfo.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_sysinfo.py deleted file mode 100644 index 4cd32c7..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_sysinfo.py +++ /dev/null @@ -1,33 +0,0 @@ -from __future__ import absolute_import - -import os - -from celery.utils.sysinfo import load_average, df - -from celery.tests.case import Case, SkipTest, patch - - -class test_load_average(Case): - - def test_avg(self): - if not hasattr(os, 'getloadavg'): - raise SkipTest('getloadavg not available') - with patch('os.getloadavg') as getloadavg: - getloadavg.return_value = 0.54736328125, 0.6357421875, 0.69921875 - l = load_average() - self.assertTrue(l) - self.assertEqual(l, (0.55, 0.64, 0.7)) - - -class test_df(Case): - - def test_df(self): - try: - from posix import statvfs_result # noqa - except ImportError: - raise SkipTest('statvfs not available') - x = df('/') - self.assertTrue(x.total_blocks) - self.assertTrue(x.available) - self.assertTrue(x.capacity) - self.assertTrue(x.stat) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_term.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_term.py deleted file mode 100644 index 1bd7e43..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_term.py +++ /dev/null @@ -1,89 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import, unicode_literals - -import sys - -from celery.utils import term -from celery.utils.term import colored, fg -from celery.five import text_t - -from celery.tests.case import Case, SkipTest - - -class test_colored(Case): - - def setUp(self): - if sys.platform == 'win32': - raise SkipTest('Colors not supported on Windows') - - self._prev_encoding = sys.getdefaultencoding - - def getdefaultencoding(): - return 'utf-8' - - sys.getdefaultencoding = getdefaultencoding - - def tearDown(self): - sys.getdefaultencoding = self._prev_encoding - - def test_colors(self): - colors = ( - ('black', term.BLACK), - ('red', term.RED), - ('green', term.GREEN), - ('yellow', term.YELLOW), - ('blue', term.BLUE), - ('magenta', term.MAGENTA), - ('cyan', term.CYAN), - ('white', term.WHITE), - ) - - for name, key in colors: - self.assertIn(fg(30 + key), str(colored().names[name]('foo'))) - - self.assertTrue(str(colored().bold('f'))) - self.assertTrue(str(colored().underline('f'))) - self.assertTrue(str(colored().blink('f'))) - self.assertTrue(str(colored().reverse('f'))) - self.assertTrue(str(colored().bright('f'))) - self.assertTrue(str(colored().ired('f'))) - self.assertTrue(str(colored().igreen('f'))) - self.assertTrue(str(colored().iyellow('f'))) - self.assertTrue(str(colored().iblue('f'))) - self.assertTrue(str(colored().imagenta('f'))) - self.assertTrue(str(colored().icyan('f'))) - self.assertTrue(str(colored().iwhite('f'))) - self.assertTrue(str(colored().reset('f'))) - - self.assertTrue(text_t(colored().green('∂bar'))) - - self.assertTrue( - colored().red('éefoo') + colored().green('∂bar')) - - self.assertEqual( - colored().red('foo').no_color(), 'foo') - - self.assertTrue( - repr(colored().blue('åfoo'))) - - self.assertIn("''", repr(colored())) - - c = colored() - s = c.red('foo', c.blue('bar'), c.green('baz')) - self.assertTrue(s.no_color()) - - c._fold_no_color(s, 'øfoo') - c._fold_no_color('fooå', s) - - c = colored().red('åfoo') - self.assertEqual( - c._add(c, 'baræ'), - '\x1b[1;31m\xe5foo\x1b[0mbar\xe6', - ) - - c2 = colored().blue('ƒƒz') - c3 = c._add(c, c2) - self.assertEqual( - c3, - '\x1b[1;31m\xe5foo\x1b[0m\x1b[1;34m\u0192\u0192z\x1b[0m', - ) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_text.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_text.py deleted file mode 100644 index 383bdb6..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_text.py +++ /dev/null @@ -1,88 +0,0 @@ -from __future__ import absolute_import - -from celery.utils.text import ( - indent, - ensure_2lines, - abbr, - truncate, - abbrtask, - pretty, -) -from celery.tests.case import AppCase, Case - -RANDTEXT = """\ -The quick brown -fox jumps -over the -lazy dog\ -""" - -RANDTEXT_RES = """\ - The quick brown - fox jumps - over the - lazy dog\ -""" - -QUEUES = { - 'queue1': { - 'exchange': 'exchange1', - 'exchange_type': 'type1', - 'routing_key': 'bind1', - }, - 'queue2': { - 'exchange': 'exchange2', - 'exchange_type': 'type2', - 'routing_key': 'bind2', - }, -} - - -QUEUE_FORMAT1 = '.> queue1 exchange=exchange1(type1) key=bind1' -QUEUE_FORMAT2 = '.> queue2 exchange=exchange2(type2) key=bind2' - - -class test_Info(AppCase): - - def test_textindent(self): - self.assertEqual(indent(RANDTEXT, 4), RANDTEXT_RES) - - def test_format_queues(self): - self.app.amqp.queues = self.app.amqp.Queues(QUEUES) - self.assertEqual(sorted(self.app.amqp.queues.format().split('\n')), - sorted([QUEUE_FORMAT1, QUEUE_FORMAT2])) - - def test_ensure_2lines(self): - self.assertEqual( - len(ensure_2lines('foo\nbar\nbaz\n').splitlines()), 3, - ) - self.assertEqual( - len(ensure_2lines('foo\nbar').splitlines()), 2, - ) - - -class test_utils(Case): - - def test_truncate_text(self): - self.assertEqual(truncate('ABCDEFGHI', 3), 'ABC...') - self.assertEqual(truncate('ABCDEFGHI', 10), 'ABCDEFGHI') - - def test_abbr(self): - self.assertEqual(abbr(None, 3), '???') - self.assertEqual(abbr('ABCDEFGHI', 6), 'ABC...') - self.assertEqual(abbr('ABCDEFGHI', 20), 'ABCDEFGHI') - self.assertEqual(abbr('ABCDEFGHI', 6, None), 'ABCDEF') - - def test_abbrtask(self): - self.assertEqual(abbrtask(None, 3), '???') - self.assertEqual( - abbrtask('feeds.tasks.refresh', 10), - '[.]refresh', - ) - self.assertEqual( - abbrtask('feeds.tasks.refresh', 30), - 'feeds.tasks.refresh', - ) - - def test_pretty(self): - self.assertTrue(pretty(('a', 'b', 'c'))) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_threads.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_threads.py deleted file mode 100644 index b7f9c43..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_threads.py +++ /dev/null @@ -1,109 +0,0 @@ -from __future__ import absolute_import - -from celery.utils.threads import ( - _LocalStack, - _FastLocalStack, - LocalManager, - Local, - bgThread, -) - -from celery.tests.case import Case, override_stdouts, patch - - -class test_bgThread(Case): - - def test_crash(self): - - class T(bgThread): - - def body(self): - raise KeyError() - - with patch('os._exit') as _exit: - with override_stdouts(): - _exit.side_effect = ValueError() - t = T() - with self.assertRaises(ValueError): - t.run() - _exit.assert_called_with(1) - - def test_interface(self): - x = bgThread() - with self.assertRaises(NotImplementedError): - x.body() - - -class test_Local(Case): - - def test_iter(self): - x = Local() - x.foo = 'bar' - ident = x.__ident_func__() - self.assertIn((ident, {'foo': 'bar'}), list(iter(x))) - - delattr(x, 'foo') - self.assertNotIn((ident, {'foo': 'bar'}), list(iter(x))) - with self.assertRaises(AttributeError): - delattr(x, 'foo') - - self.assertIsNotNone(x(lambda: 'foo')) - - -class test_LocalStack(Case): - - def test_stack(self): - x = _LocalStack() - self.assertIsNone(x.pop()) - x.__release_local__() - ident = x.__ident_func__ - x.__ident_func__ = ident - - with self.assertRaises(RuntimeError): - x()[0] - - x.push(['foo']) - self.assertEqual(x()[0], 'foo') - x.pop() - with self.assertRaises(RuntimeError): - x()[0] - - -class test_FastLocalStack(Case): - - def test_stack(self): - x = _FastLocalStack() - x.push(['foo']) - x.push(['bar']) - self.assertEqual(x.top, ['bar']) - self.assertEqual(len(x), 2) - x.pop() - self.assertEqual(x.top, ['foo']) - x.pop() - self.assertIsNone(x.top) - - -class test_LocalManager(Case): - - def test_init(self): - x = LocalManager() - self.assertListEqual(x.locals, []) - self.assertTrue(x.ident_func) - - def ident(): - return 1 - - loc = Local() - x = LocalManager([loc], ident_func=ident) - self.assertListEqual(x.locals, [loc]) - x = LocalManager(loc, ident_func=ident) - self.assertListEqual(x.locals, [loc]) - self.assertIs(x.ident_func, ident) - self.assertIs(x.locals[0].__ident_func__, ident) - self.assertEqual(x.get_ident(), 1) - - with patch('celery.utils.threads.release_local') as release: - x.cleanup() - release.assert_called_with(loc) - - self.assertTrue(repr(x)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_timer2.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_timer2.py deleted file mode 100644 index cb18c21..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_timer2.py +++ /dev/null @@ -1,187 +0,0 @@ -from __future__ import absolute_import - -import sys -import time - -import celery.utils.timer2 as timer2 - -from celery.tests.case import Case, Mock, patch -from kombu.tests.case import redirect_stdouts - - -class test_Entry(Case): - - def test_call(self): - scratch = [None] - - def timed(x, y, moo='foo'): - scratch[0] = (x, y, moo) - - tref = timer2.Entry(timed, (4, 4), {'moo': 'baz'}) - tref() - - self.assertTupleEqual(scratch[0], (4, 4, 'baz')) - - def test_cancel(self): - tref = timer2.Entry(lambda x: x, (1, ), {}) - tref.cancel() - self.assertTrue(tref.cancelled) - - def test_repr(self): - tref = timer2.Entry(lambda x: x(1, ), {}) - self.assertTrue(repr(tref)) - - -class test_Schedule(Case): - - def test_supports_Timer_interface(self): - x = timer2.Schedule() - x.stop() - - tref = Mock() - x.cancel(tref) - tref.cancel.assert_called_with() - - self.assertIs(x.schedule, x) - - def test_handle_error(self): - from datetime import datetime - scratch = [None] - - def on_error(exc_info): - scratch[0] = exc_info - - s = timer2.Schedule(on_error=on_error) - - with patch('kombu.async.timer.to_timestamp') as tot: - tot.side_effect = OverflowError() - s.enter_at(timer2.Entry(lambda: None, (), {}), - eta=datetime.now()) - s.enter_at(timer2.Entry(lambda: None, (), {}), eta=None) - s.on_error = None - with self.assertRaises(OverflowError): - s.enter_at(timer2.Entry(lambda: None, (), {}), - eta=datetime.now()) - exc = scratch[0] - self.assertIsInstance(exc, OverflowError) - - -class test_Timer(Case): - - def test_enter_after(self): - t = timer2.Timer() - try: - done = [False] - - def set_done(): - done[0] = True - - t.call_after(0.3, set_done) - mss = 0 - while not done[0]: - if mss >= 2.0: - raise Exception('test timed out') - time.sleep(0.1) - mss += 0.1 - finally: - t.stop() - - def test_exit_after(self): - t = timer2.Timer() - t.call_after = Mock() - t.exit_after(0.3, priority=10) - t.call_after.assert_called_with(0.3, sys.exit, 10) - - def test_ensure_started_not_started(self): - t = timer2.Timer() - t.running = True - t.start = Mock() - t.ensure_started() - self.assertFalse(t.start.called) - - def test_call_repeatedly(self): - t = timer2.Timer() - try: - t.schedule.enter_after = Mock() - - myfun = Mock() - myfun.__name__ = 'myfun' - t.call_repeatedly(0.03, myfun) - - self.assertEqual(t.schedule.enter_after.call_count, 1) - args1, _ = t.schedule.enter_after.call_args_list[0] - sec1, tref1, _ = args1 - self.assertEqual(sec1, 0.03) - tref1() - - self.assertEqual(t.schedule.enter_after.call_count, 2) - args2, _ = t.schedule.enter_after.call_args_list[1] - sec2, tref2, _ = args2 - self.assertEqual(sec2, 0.03) - tref2.cancelled = True - tref2() - - self.assertEqual(t.schedule.enter_after.call_count, 2) - finally: - t.stop() - - @patch('kombu.async.timer.logger') - def test_apply_entry_error_handled(self, logger): - t = timer2.Timer() - t.schedule.on_error = None - - fun = Mock() - fun.side_effect = ValueError() - - t.schedule.apply_entry(fun) - self.assertTrue(logger.error.called) - - @redirect_stdouts - def test_apply_entry_error_not_handled(self, stdout, stderr): - t = timer2.Timer() - t.schedule.on_error = Mock() - - fun = Mock() - fun.side_effect = ValueError() - t.schedule.apply_entry(fun) - fun.assert_called_with() - self.assertFalse(stderr.getvalue()) - - @patch('os._exit') - def test_thread_crash(self, _exit): - t = timer2.Timer() - t._next_entry = Mock() - t._next_entry.side_effect = OSError(131) - t.run() - _exit.assert_called_with(1) - - def test_gc_race_lost(self): - t = timer2.Timer() - t._is_stopped.set = Mock() - t._is_stopped.set.side_effect = TypeError() - - t._is_shutdown.set() - t.run() - t._is_stopped.set.assert_called_with() - - def test_to_timestamp(self): - self.assertIs(timer2.to_timestamp(3.13), 3.13) - - def test_test_enter(self): - t = timer2.Timer() - t._do_enter = Mock() - e = Mock() - t.enter(e, 13, 0) - t._do_enter.assert_called_with('enter_at', e, 13, priority=0) - - def test_test_enter_after(self): - t = timer2.Timer() - t._do_enter = Mock() - t.enter_after() - t._do_enter.assert_called_with('enter_after') - - def test_cancel(self): - t = timer2.Timer() - tref = Mock() - t.cancel(tref) - tref.cancel.assert_called_with() diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_timeutils.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_timeutils.py deleted file mode 100644 index 2258d06..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_timeutils.py +++ /dev/null @@ -1,267 +0,0 @@ -from __future__ import absolute_import - -import pytz - -from datetime import datetime, timedelta, tzinfo -from pytz import AmbiguousTimeError - -from celery.utils.timeutils import ( - delta_resolution, - humanize_seconds, - maybe_iso8601, - maybe_timedelta, - timedelta_seconds, - timezone, - rate, - remaining, - make_aware, - maybe_make_aware, - localize, - LocalTimezone, - ffwd, - utcoffset, -) -from celery.utils.iso8601 import parse_iso8601 -from celery.tests.case import Case, Mock, patch - - -class test_LocalTimezone(Case): - - def test_daylight(self): - with patch('celery.utils.timeutils._time') as time: - time.timezone = 3600 - time.daylight = False - x = LocalTimezone() - self.assertEqual(x.STDOFFSET, timedelta(seconds=-3600)) - self.assertEqual(x.DSTOFFSET, x.STDOFFSET) - time.daylight = True - time.altzone = 3600 - y = LocalTimezone() - self.assertEqual(y.STDOFFSET, timedelta(seconds=-3600)) - self.assertEqual(y.DSTOFFSET, timedelta(seconds=-3600)) - - self.assertTrue(repr(y)) - - y._isdst = Mock() - y._isdst.return_value = True - self.assertTrue(y.utcoffset(datetime.now())) - self.assertFalse(y.dst(datetime.now())) - y._isdst.return_value = False - self.assertTrue(y.utcoffset(datetime.now())) - self.assertFalse(y.dst(datetime.now())) - - self.assertTrue(y.tzname(datetime.now())) - - -class test_iso8601(Case): - - def test_parse_with_timezone(self): - d = datetime.utcnow().replace(tzinfo=pytz.utc) - self.assertEqual(parse_iso8601(d.isoformat()), d) - # 2013-06-07T20:12:51.775877+00:00 - iso = d.isoformat() - iso1 = iso.replace('+00:00', '-01:00') - d1 = parse_iso8601(iso1) - self.assertEqual(d1.tzinfo._minutes, -60) - iso2 = iso.replace('+00:00', '+01:00') - d2 = parse_iso8601(iso2) - self.assertEqual(d2.tzinfo._minutes, +60) - iso3 = iso.replace('+00:00', 'Z') - d3 = parse_iso8601(iso3) - self.assertEqual(d3.tzinfo, pytz.UTC) - - -class test_timeutils(Case): - - def test_delta_resolution(self): - D = delta_resolution - dt = datetime(2010, 3, 30, 11, 50, 58, 41065) - deltamap = ((timedelta(days=2), datetime(2010, 3, 30, 0, 0)), - (timedelta(hours=2), datetime(2010, 3, 30, 11, 0)), - (timedelta(minutes=2), datetime(2010, 3, 30, 11, 50)), - (timedelta(seconds=2), dt)) - for delta, shoulda in deltamap: - self.assertEqual(D(dt, delta), shoulda) - - def test_timedelta_seconds(self): - deltamap = ((timedelta(seconds=1), 1), - (timedelta(seconds=27), 27), - (timedelta(minutes=3), 3 * 60), - (timedelta(hours=4), 4 * 60 * 60), - (timedelta(days=3), 3 * 86400)) - for delta, seconds in deltamap: - self.assertEqual(timedelta_seconds(delta), seconds) - - def test_timedelta_seconds_returns_0_on_negative_time(self): - delta = timedelta(days=-2) - self.assertEqual(timedelta_seconds(delta), 0) - - def test_humanize_seconds(self): - t = ((4 * 60 * 60 * 24, '4.00 days'), - (1 * 60 * 60 * 24, '1.00 day'), - (4 * 60 * 60, '4.00 hours'), - (1 * 60 * 60, '1.00 hour'), - (4 * 60, '4.00 minutes'), - (1 * 60, '1.00 minute'), - (4, '4.00 seconds'), - (1, '1.00 second'), - (4.3567631221, '4.36 seconds'), - (0, 'now')) - - for seconds, human in t: - self.assertEqual(humanize_seconds(seconds), human) - - self.assertEqual(humanize_seconds(4, prefix='about '), - 'about 4.00 seconds') - - def test_maybe_iso8601_datetime(self): - now = datetime.now() - self.assertIs(maybe_iso8601(now), now) - - def test_maybe_timedelta(self): - D = maybe_timedelta - - for i in (30, 30.6): - self.assertEqual(D(i), timedelta(seconds=i)) - - self.assertEqual(D(timedelta(days=2)), timedelta(days=2)) - - def test_remaining_relative(self): - remaining(datetime.utcnow(), timedelta(hours=1), relative=True) - - -class test_timezone(Case): - - def test_get_timezone_with_pytz(self): - self.assertTrue(timezone.get_timezone('UTC')) - - def test_tz_or_local(self): - self.assertEqual(timezone.tz_or_local(), timezone.local) - self.assertTrue(timezone.tz_or_local(timezone.utc)) - - def test_to_local(self): - self.assertTrue( - timezone.to_local(make_aware(datetime.utcnow(), timezone.utc)), - ) - self.assertTrue( - timezone.to_local(datetime.utcnow()) - ) - - def test_to_local_fallback(self): - self.assertTrue( - timezone.to_local_fallback( - make_aware(datetime.utcnow(), timezone.utc)), - ) - self.assertTrue( - timezone.to_local_fallback(datetime.utcnow()) - ) - - -class test_make_aware(Case): - - def test_tz_without_localize(self): - tz = tzinfo() - self.assertFalse(hasattr(tz, 'localize')) - wtz = make_aware(datetime.utcnow(), tz) - self.assertEqual(wtz.tzinfo, tz) - - def test_when_has_localize(self): - - class tzz(tzinfo): - raises = False - - def localize(self, dt, is_dst=None): - self.localized = True - if self.raises and is_dst is None: - self.raised = True - raise AmbiguousTimeError() - return 1 # needed by min() in Python 3 (None not hashable) - - tz = tzz() - make_aware(datetime.utcnow(), tz) - self.assertTrue(tz.localized) - - tz2 = tzz() - tz2.raises = True - make_aware(datetime.utcnow(), tz2) - self.assertTrue(tz2.localized) - self.assertTrue(tz2.raised) - - def test_maybe_make_aware(self): - aware = datetime.utcnow().replace(tzinfo=timezone.utc) - self.assertTrue(maybe_make_aware(aware), timezone.utc) - naive = datetime.utcnow() - self.assertTrue(maybe_make_aware(naive)) - - -class test_localize(Case): - - def test_tz_without_normalize(self): - tz = tzinfo() - self.assertFalse(hasattr(tz, 'normalize')) - self.assertTrue(localize(make_aware(datetime.utcnow(), tz), tz)) - - def test_when_has_normalize(self): - - class tzz(tzinfo): - raises = None - - def normalize(self, dt, **kwargs): - self.normalized = True - if self.raises and kwargs and kwargs.get('is_dst') is None: - self.raised = True - raise self.raises - return 1 # needed by min() in Python 3 (None not hashable) - - tz = tzz() - localize(make_aware(datetime.utcnow(), tz), tz) - self.assertTrue(tz.normalized) - - tz2 = tzz() - tz2.raises = AmbiguousTimeError() - localize(make_aware(datetime.utcnow(), tz2), tz2) - self.assertTrue(tz2.normalized) - self.assertTrue(tz2.raised) - - tz3 = tzz() - tz3.raises = TypeError() - localize(make_aware(datetime.utcnow(), tz3), tz3) - self.assertTrue(tz3.normalized) - self.assertTrue(tz3.raised) - - -class test_rate_limit_string(Case): - - def test_conversion(self): - self.assertEqual(rate(999), 999) - self.assertEqual(rate(7.5), 7.5) - self.assertEqual(rate('2.5/s'), 2.5) - self.assertEqual(rate('1456/s'), 1456) - self.assertEqual(rate('100/m'), - 100 / 60.0) - self.assertEqual(rate('10/h'), - 10 / 60.0 / 60.0) - - for zero in (0, None, '0', '0/m', '0/h', '0/s', '0.0/s'): - self.assertEqual(rate(zero), 0) - - -class test_ffwd(Case): - - def test_repr(self): - x = ffwd(year=2012) - self.assertTrue(repr(x)) - - def test_radd_with_unknown_gives_NotImplemented(self): - x = ffwd(year=2012) - self.assertEqual(x.__radd__(object()), NotImplemented) - - -class test_utcoffset(Case): - - def test_utcoffset(self): - with patch('celery.utils.timeutils._time') as _time: - _time.daylight = True - self.assertIsNotNone(utcoffset()) - _time.daylight = False - self.assertIsNotNone(utcoffset()) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_utils.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_utils.py deleted file mode 100644 index 2837ad6..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_utils.py +++ /dev/null @@ -1,108 +0,0 @@ -from __future__ import absolute_import - -import pytz - -from datetime import datetime, date, time, timedelta - -from kombu import Queue - -from celery.utils import ( - chunks, - is_iterable, - cached_property, - warn_deprecated, - worker_direct, - gen_task_name, - jsonify, -) -from celery.tests.case import Case, Mock, patch - - -def double(x): - return x * 2 - - -class test_worker_direct(Case): - - def test_returns_if_queue(self): - q = Queue('foo') - self.assertIs(worker_direct(q), q) - - -class test_gen_task_name(Case): - - def test_no_module(self): - app = Mock() - app.name == '__main__' - self.assertTrue(gen_task_name(app, 'foo', 'axsadaewe')) - - -class test_jsonify(Case): - - def test_simple(self): - self.assertTrue(jsonify(Queue('foo'))) - self.assertTrue(jsonify(['foo', 'bar', 'baz'])) - self.assertTrue(jsonify({'foo': 'bar'})) - self.assertTrue(jsonify(datetime.utcnow())) - self.assertTrue(jsonify(datetime.utcnow().replace(tzinfo=pytz.utc))) - self.assertTrue(jsonify(datetime.utcnow().replace(microsecond=0))) - self.assertTrue(jsonify(date(2012, 1, 1))) - self.assertTrue(jsonify(time(hour=1, minute=30))) - self.assertTrue(jsonify(time(hour=1, minute=30, microsecond=3))) - self.assertTrue(jsonify(timedelta(seconds=30))) - self.assertTrue(jsonify(10)) - self.assertTrue(jsonify(10.3)) - self.assertTrue(jsonify('hello')) - - with self.assertRaises(ValueError): - jsonify(object()) - - -class test_chunks(Case): - - def test_chunks(self): - - # n == 2 - x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 2) - self.assertListEqual( - list(x), - [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10]], - ) - - # n == 3 - x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 3) - self.assertListEqual( - list(x), - [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]], - ) - - # n == 2 (exact) - x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), 2) - self.assertListEqual( - list(x), - [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]], - ) - - -class test_utils(Case): - - def test_is_iterable(self): - for a in 'f', ['f'], ('f', ), {'f': 'f'}: - self.assertTrue(is_iterable(a)) - for b in object(), 1: - self.assertFalse(is_iterable(b)) - - def test_cached_property(self): - - def fun(obj): - return fun.value - - x = cached_property(fun) - self.assertIs(x.__get__(None), x) - self.assertIs(x.__set__(None, None), x) - self.assertIs(x.__delete__(None), x) - - @patch('warnings.warn') - def test_warn_deprecated(self, warn): - warn_deprecated('Foo') - self.assertTrue(warn.called) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_autoreload.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_autoreload.py deleted file mode 100644 index e61b330..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_autoreload.py +++ /dev/null @@ -1,328 +0,0 @@ -from __future__ import absolute_import - -import errno -import select -import sys - -from time import time - -from celery.worker import autoreload -from celery.worker.autoreload import ( - WorkerComponent, - file_hash, - BaseMonitor, - StatMonitor, - KQueueMonitor, - InotifyMonitor, - default_implementation, - Autoreloader, -) - -from celery.tests.case import AppCase, Case, Mock, SkipTest, patch, mock_open - - -class test_WorkerComponent(AppCase): - - def test_create_threaded(self): - w = Mock() - w.use_eventloop = False - x = WorkerComponent(w) - x.instantiate = Mock() - r = x.create(w) - x.instantiate.assert_called_with(w.autoreloader_cls, w) - self.assertIs(r, w.autoreloader) - - @patch('select.kevent', create=True) - @patch('select.kqueue', create=True) - @patch('kombu.utils.eventio.kqueue') - def test_create_ev(self, kq, kqueue, kevent): - w = Mock() - w.use_eventloop = True - x = WorkerComponent(w) - x.instantiate = Mock() - r = x.create(w) - x.instantiate.assert_called_with(w.autoreloader_cls, w) - x.register_with_event_loop(w, w.hub) - self.assertIsNone(r) - w.hub.on_close.add.assert_called_with( - w.autoreloader.on_event_loop_close, - ) - - -class test_file_hash(Case): - - def test_hash(self): - with mock_open() as a: - a.write('the quick brown fox\n') - a.seek(0) - A = file_hash('foo') - with mock_open() as b: - b.write('the quick brown bar\n') - b.seek(0) - B = file_hash('bar') - self.assertNotEqual(A, B) - - -class test_BaseMonitor(Case): - - def test_start_stop_on_change(self): - x = BaseMonitor(['a', 'b']) - - with self.assertRaises(NotImplementedError): - x.start() - x.stop() - x.on_change([]) - x._on_change = Mock() - x.on_change('foo') - x._on_change.assert_called_with('foo') - - -class test_StatMonitor(Case): - - @patch('os.stat') - def test_start(self, stat): - - class st(object): - st_mtime = time() - stat.return_value = st() - x = StatMonitor(['a', 'b']) - - def on_is_set(): - if x.shutdown_event.is_set.call_count > 3: - return True - return False - x.shutdown_event = Mock() - x.shutdown_event.is_set.side_effect = on_is_set - - x.start() - x.shutdown_event = Mock() - stat.side_effect = OSError() - x.start() - - @patch('os.stat') - def test_mtime_stat_raises(self, stat): - stat.side_effect = ValueError() - x = StatMonitor(['a', 'b']) - x._mtime('a') - - -class test_KQueueMonitor(Case): - - @patch('select.kqueue', create=True) - @patch('os.close') - def test_stop(self, close, kqueue): - x = KQueueMonitor(['a', 'b']) - x.poller = Mock() - x.filemap['a'] = 10 - x.stop() - x.poller.close.assert_called_with() - close.assert_called_with(10) - - close.side_effect = OSError() - close.side_effect.errno = errno.EBADF - x.stop() - - def test_register_with_event_loop(self): - from kombu.utils import eventio - if eventio.kqueue is None: - raise SkipTest('version of kombu does not work with pypy') - x = KQueueMonitor(['a', 'b']) - hub = Mock(name='hub') - x.add_events = Mock(name='add_events()') - x.register_with_event_loop(hub) - x.add_events.assert_called_with(x._kq) - self.assertEqual( - x._kq.on_file_change, - x.handle_event, - ) - - def test_on_event_loop_close(self): - x = KQueueMonitor(['a', 'b']) - x.close = Mock() - x._kq = Mock(name='_kq') - x.on_event_loop_close(Mock(name='hub')) - x.close.assert_called_with(x._kq) - - def test_handle_event(self): - x = KQueueMonitor(['a', 'b']) - x.on_change = Mock() - eA = Mock() - eA.ident = 'a' - eB = Mock() - eB.ident = 'b' - x.fdmap = {'a': 'A', 'b': 'B'} - x.handle_event([eA, eB]) - x.on_change.assert_called_with(['A', 'B']) - - @patch('kombu.utils.eventio.kqueue', create=True) - @patch('kombu.utils.eventio.kevent', create=True) - @patch('os.open') - @patch('select.kqueue', create=True) - def test_start(self, _kq, osopen, kevent, kqueue): - from kombu.utils import eventio - prev_poll, eventio.poll = eventio.poll, kqueue - prev = {} - flags = ['KQ_FILTER_VNODE', 'KQ_EV_ADD', 'KQ_EV_ENABLE', - 'KQ_EV_CLEAR', 'KQ_NOTE_WRITE', 'KQ_NOTE_EXTEND'] - for i, flag in enumerate(flags): - prev[flag] = getattr(eventio, flag, None) - if not prev[flag]: - setattr(eventio, flag, i) - try: - kq = kqueue.return_value = Mock() - - class ev(object): - ident = 10 - filter = eventio.KQ_FILTER_VNODE - fflags = eventio.KQ_NOTE_WRITE - kq.control.return_value = [ev()] - x = KQueueMonitor(['a']) - osopen.return_value = 10 - calls = [0] - - def on_is_set(): - calls[0] += 1 - if calls[0] > 2: - return True - return False - x.shutdown_event = Mock() - x.shutdown_event.is_set.side_effect = on_is_set - x.start() - finally: - for flag in flags: - if prev[flag]: - setattr(eventio, flag, prev[flag]) - else: - delattr(eventio, flag) - eventio.poll = prev_poll - - -class test_InotifyMonitor(Case): - - @patch('celery.worker.autoreload.pyinotify') - def test_start(self, inotify): - x = InotifyMonitor(['a']) - inotify.IN_MODIFY = 1 - inotify.IN_ATTRIB = 2 - x.start() - - inotify.WatchManager.side_effect = ValueError() - with self.assertRaises(ValueError): - x.start() - x.stop() - - x._on_change = None - x.process_(Mock()) - x._on_change = Mock() - x.process_(Mock()) - self.assertTrue(x._on_change.called) - - -class test_default_implementation(Case): - - @patch('select.kqueue', create=True) - @patch('kombu.utils.eventio.kqueue', create=True) - def test_kqueue(self, kq, kqueue): - self.assertEqual(default_implementation(), 'kqueue') - - @patch('celery.worker.autoreload.pyinotify') - def test_inotify(self, pyinotify): - kq = getattr(select, 'kqueue', None) - try: - delattr(select, 'kqueue') - except AttributeError: - pass - platform, sys.platform = sys.platform, 'linux' - try: - self.assertEqual(default_implementation(), 'inotify') - ino, autoreload.pyinotify = autoreload.pyinotify, None - try: - self.assertEqual(default_implementation(), 'stat') - finally: - autoreload.pyinotify = ino - finally: - if kq: - select.kqueue = kq - sys.platform = platform - - -class test_Autoreloader(AppCase): - - def test_register_with_event_loop(self): - x = Autoreloader(Mock(), modules=[__name__]) - hub = Mock() - x._monitor = None - x.on_init = Mock() - - def se(*args, **kwargs): - x._monitor = Mock() - x.on_init.side_effect = se - - x.register_with_event_loop(hub) - x.on_init.assert_called_with() - x._monitor.register_with_event_loop.assert_called_with(hub) - - x._monitor.register_with_event_loop.reset_mock() - x.register_with_event_loop(hub) - x._monitor.register_with_event_loop.assert_called_with(hub) - - def test_on_event_loop_close(self): - x = Autoreloader(Mock(), modules=[__name__]) - hub = Mock() - x._monitor = Mock() - x.on_event_loop_close(hub) - x._monitor.on_event_loop_close.assert_called_with(hub) - x._monitor = None - x.on_event_loop_close(hub) - - @patch('celery.worker.autoreload.file_hash') - def test_start(self, fhash): - x = Autoreloader(Mock(), modules=[__name__]) - x.Monitor = Mock() - mon = x.Monitor.return_value = Mock() - mon.start.side_effect = OSError() - mon.start.side_effect.errno = errno.EINTR - x.body() - mon.start.side_effect.errno = errno.ENOENT - with self.assertRaises(OSError): - x.body() - mon.start.side_effect = None - x.body() - - @patch('celery.worker.autoreload.file_hash') - @patch('os.path.exists') - def test_maybe_modified(self, exists, fhash): - exists.return_value = True - fhash.return_value = 'abcd' - x = Autoreloader(Mock(), modules=[__name__]) - x._hashes = {} - x._hashes[__name__] = 'dcba' - self.assertTrue(x._maybe_modified(__name__)) - x._hashes[__name__] = 'abcd' - self.assertFalse(x._maybe_modified(__name__)) - exists.return_value = False - self.assertFalse(x._maybe_modified(__name__)) - - def test_on_change(self): - x = Autoreloader(Mock(), modules=[__name__]) - mm = x._maybe_modified = Mock(0) - mm.return_value = True - x._reload = Mock() - x.file_to_module[__name__] = __name__ - x.on_change([__name__]) - self.assertTrue(x._reload.called) - mm.return_value = False - x.on_change([__name__]) - - def test_reload(self): - x = Autoreloader(Mock(), modules=[__name__]) - x._reload([__name__]) - x.controller.reload.assert_called_with([__name__], reload=True) - - def test_stop(self): - x = Autoreloader(Mock(), modules=[__name__]) - x._monitor = None - x.stop() - x._monitor = Mock() - x.stop() - x._monitor.stop.assert_called_with() diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_autoscale.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_autoscale.py deleted file mode 100644 index 45ea488..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_autoscale.py +++ /dev/null @@ -1,198 +0,0 @@ -from __future__ import absolute_import - -import sys - -from celery.concurrency.base import BasePool -from celery.five import monotonic -from celery.worker import state -from celery.worker import autoscale -from celery.tests.case import AppCase, Mock, patch, sleepdeprived - - -class Object(object): - pass - - -class MockPool(BasePool): - shrink_raises_exception = False - shrink_raises_ValueError = False - - def __init__(self, *args, **kwargs): - super(MockPool, self).__init__(*args, **kwargs) - self._pool = Object() - self._pool._processes = self.limit - - def grow(self, n=1): - self._pool._processes += n - - def shrink(self, n=1): - if self.shrink_raises_exception: - raise KeyError('foo') - if self.shrink_raises_ValueError: - raise ValueError('foo') - self._pool._processes -= n - - @property - def num_processes(self): - return self._pool._processes - - -class test_WorkerComponent(AppCase): - - def test_register_with_event_loop(self): - parent = Mock(name='parent') - parent.autoscale = True - parent.consumer.on_task_message = set() - w = autoscale.WorkerComponent(parent) - self.assertIsNone(parent.autoscaler) - self.assertTrue(w.enabled) - - hub = Mock(name='hub') - w.create(parent) - w.register_with_event_loop(parent, hub) - self.assertIn( - parent.autoscaler.maybe_scale, - parent.consumer.on_task_message, - ) - hub.call_repeatedly.assert_called_with( - parent.autoscaler.keepalive, parent.autoscaler.maybe_scale, - ) - - parent.hub = hub - hub.on_init = [] - w.instantiate = Mock() - w.register_with_event_loop(parent, Mock(name='loop')) - self.assertTrue(parent.consumer.on_task_message) - - -class test_Autoscaler(AppCase): - - def setup(self): - self.pool = MockPool(3) - - def test_stop(self): - - class Scaler(autoscale.Autoscaler): - alive = True - joined = False - - def is_alive(self): - return self.alive - - def join(self, timeout=None): - self.joined = True - - worker = Mock(name='worker') - x = Scaler(self.pool, 10, 3, worker=worker) - x._is_stopped.set() - x.stop() - self.assertTrue(x.joined) - x.joined = False - x.alive = False - x.stop() - self.assertFalse(x.joined) - - @sleepdeprived(autoscale) - def test_body(self): - worker = Mock(name='worker') - x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) - x.body() - self.assertEqual(x.pool.num_processes, 3) - for i in range(20): - state.reserved_requests.add(i) - x.body() - x.body() - self.assertEqual(x.pool.num_processes, 10) - self.assertTrue(worker.consumer._update_prefetch_count.called) - state.reserved_requests.clear() - x.body() - self.assertEqual(x.pool.num_processes, 10) - x._last_action = monotonic() - 10000 - x.body() - self.assertEqual(x.pool.num_processes, 3) - self.assertTrue(worker.consumer._update_prefetch_count.called) - - def test_run(self): - - class Scaler(autoscale.Autoscaler): - scale_called = False - - def body(self): - self.scale_called = True - self._is_shutdown.set() - - worker = Mock(name='worker') - x = Scaler(self.pool, 10, 3, worker=worker) - x.run() - self.assertTrue(x._is_shutdown.isSet()) - self.assertTrue(x._is_stopped.isSet()) - self.assertTrue(x.scale_called) - - def test_shrink_raises_exception(self): - worker = Mock(name='worker') - x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) - x.scale_up(3) - x._last_action = monotonic() - 10000 - x.pool.shrink_raises_exception = True - x.scale_down(1) - - @patch('celery.worker.autoscale.debug') - def test_shrink_raises_ValueError(self, debug): - worker = Mock(name='worker') - x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) - x.scale_up(3) - x._last_action = monotonic() - 10000 - x.pool.shrink_raises_ValueError = True - x.scale_down(1) - self.assertTrue(debug.call_count) - - def test_update_and_force(self): - worker = Mock(name='worker') - x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) - self.assertEqual(x.processes, 3) - x.force_scale_up(5) - self.assertEqual(x.processes, 8) - x.update(5, None) - self.assertEqual(x.processes, 5) - x.force_scale_down(3) - self.assertEqual(x.processes, 2) - x.update(3, None) - self.assertEqual(x.processes, 3) - x.force_scale_down(1000) - self.assertEqual(x.min_concurrency, 0) - self.assertEqual(x.processes, 0) - x.force_scale_up(1000) - x.min_concurrency = 1 - x.force_scale_down(1) - - x.update(max=300, min=10) - x.update(max=300, min=2) - x.update(max=None, min=None) - - def test_info(self): - worker = Mock(name='worker') - x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) - info = x.info() - self.assertEqual(info['max'], 10) - self.assertEqual(info['min'], 3) - self.assertEqual(info['current'], 3) - - @patch('os._exit') - def test_thread_crash(self, _exit): - - class _Autoscaler(autoscale.Autoscaler): - - def body(self): - self._is_shutdown.set() - raise OSError('foo') - worker = Mock(name='worker') - x = _Autoscaler(self.pool, 10, 3, worker=worker) - - stderr = Mock() - p, sys.stderr = sys.stderr, stderr - try: - x.run() - finally: - sys.stderr = p - _exit.assert_called_with(1) - self.assertTrue(stderr.write.call_count) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_bootsteps.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_bootsteps.py deleted file mode 100644 index 522d263..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_bootsteps.py +++ /dev/null @@ -1,338 +0,0 @@ -from __future__ import absolute_import - -from celery import bootsteps - -from celery.tests.case import AppCase, Mock, patch - - -class test_StepFormatter(AppCase): - - def test_get_prefix(self): - f = bootsteps.StepFormatter() - s = Mock() - s.last = True - self.assertEqual(f._get_prefix(s), f.blueprint_prefix) - - s2 = Mock() - s2.last = False - s2.conditional = True - self.assertEqual(f._get_prefix(s2), f.conditional_prefix) - - s3 = Mock() - s3.last = s3.conditional = False - self.assertEqual(f._get_prefix(s3), '') - - def test_node(self): - f = bootsteps.StepFormatter() - f.draw_node = Mock() - step = Mock() - step.last = False - f.node(step, x=3) - f.draw_node.assert_called_with(step, f.node_scheme, {'x': 3}) - - step.last = True - f.node(step, x=3) - f.draw_node.assert_called_with(step, f.blueprint_scheme, {'x': 3}) - - def test_edge(self): - f = bootsteps.StepFormatter() - f.draw_edge = Mock() - a, b = Mock(), Mock() - a.last = True - f.edge(a, b, x=6) - f.draw_edge.assert_called_with(a, b, f.edge_scheme, { - 'x': 6, 'arrowhead': 'none', 'color': 'darkseagreen3', - }) - - a.last = False - f.edge(a, b, x=6) - f.draw_edge.assert_called_with(a, b, f.edge_scheme, { - 'x': 6, - }) - - -class test_Step(AppCase): - - class Def(bootsteps.StartStopStep): - name = 'test_Step.Def' - - def setup(self): - self.steps = [] - - def test_blueprint_name(self, bp='test_blueprint_name'): - - class X(bootsteps.Step): - blueprint = bp - name = 'X' - self.assertEqual(X.name, 'X') - - class Y(bootsteps.Step): - name = '%s.Y' % bp - self.assertEqual(Y.name, '%s.Y' % bp) - - def test_init(self): - self.assertTrue(self.Def(self)) - - def test_create(self): - self.Def(self).create(self) - - def test_include_if(self): - x = self.Def(self) - x.enabled = True - self.assertTrue(x.include_if(self)) - - x.enabled = False - self.assertFalse(x.include_if(self)) - - def test_instantiate(self): - self.assertIsInstance(self.Def(self).instantiate(self.Def, self), - self.Def) - - def test_include_when_enabled(self): - x = self.Def(self) - x.create = Mock() - x.create.return_value = 'George' - self.assertTrue(x.include(self)) - - self.assertEqual(x.obj, 'George') - x.create.assert_called_with(self) - - def test_include_when_disabled(self): - x = self.Def(self) - x.enabled = False - x.create = Mock() - - self.assertFalse(x.include(self)) - self.assertFalse(x.create.call_count) - - def test_repr(self): - x = self.Def(self) - self.assertTrue(repr(x)) - - -class test_ConsumerStep(AppCase): - - def test_interface(self): - step = bootsteps.ConsumerStep(self) - with self.assertRaises(NotImplementedError): - step.get_consumers(self) - - def test_start_stop_shutdown(self): - consumer = Mock() - self.connection = Mock() - - class Step(bootsteps.ConsumerStep): - - def get_consumers(self, c): - return [consumer] - - step = Step(self) - self.assertEqual(step.get_consumers(self), [consumer]) - - step.start(self) - consumer.consume.assert_called_with() - step.stop(self) - consumer.cancel.assert_called_with() - - step.shutdown(self) - consumer.channel.close.assert_called_with() - - def test_start_no_consumers(self): - self.connection = Mock() - - class Step(bootsteps.ConsumerStep): - - def get_consumers(self, c): - return () - - step = Step(self) - step.start(self) - - -class test_StartStopStep(AppCase): - - class Def(bootsteps.StartStopStep): - name = 'test_StartStopStep.Def' - - def setup(self): - self.steps = [] - - def test_start__stop(self): - x = self.Def(self) - x.create = Mock() - - # include creates the underlying object and sets - # its x.obj attribute to it, as well as appending - # it to the parent.steps list. - x.include(self) - self.assertTrue(self.steps) - self.assertIs(self.steps[0], x) - - x.start(self) - x.obj.start.assert_called_with() - - x.stop(self) - x.obj.stop.assert_called_with() - - x.obj = None - self.assertIsNone(x.start(self)) - - def test_include_when_disabled(self): - x = self.Def(self) - x.enabled = False - x.include(self) - self.assertFalse(self.steps) - - def test_terminate(self): - x = self.Def(self) - x.create = Mock() - - x.include(self) - delattr(x.obj, 'terminate') - x.terminate(self) - x.obj.stop.assert_called_with() - - -class test_Blueprint(AppCase): - - class Blueprint(bootsteps.Blueprint): - name = 'test_Blueprint' - - def test_steps_added_to_unclaimed(self): - - class tnA(bootsteps.Step): - name = 'test_Blueprint.A' - - class tnB(bootsteps.Step): - name = 'test_Blueprint.B' - - class xxA(bootsteps.Step): - name = 'xx.A' - - class Blueprint(self.Blueprint): - default_steps = [tnA, tnB] - blueprint = Blueprint(app=self.app) - - self.assertIn(tnA, blueprint._all_steps()) - self.assertIn(tnB, blueprint._all_steps()) - self.assertNotIn(xxA, blueprint._all_steps()) - - def test_init(self): - blueprint = self.Blueprint(app=self.app) - self.assertIs(blueprint.app, self.app) - self.assertEqual(blueprint.name, 'test_Blueprint') - - def test_close__on_close_is_None(self): - blueprint = self.Blueprint(app=self.app) - blueprint.on_close = None - blueprint.send_all = Mock() - blueprint.close(1) - blueprint.send_all.assert_called_with( - 1, 'close', 'closing', reverse=False, - ) - - def test_send_all_with_None_steps(self): - parent = Mock() - blueprint = self.Blueprint(app=self.app) - parent.steps = [None, None, None] - blueprint.send_all(parent, 'close', 'Closing', reverse=False) - - def test_join_raises_IGNORE_ERRORS(self): - prev, bootsteps.IGNORE_ERRORS = bootsteps.IGNORE_ERRORS, (KeyError, ) - try: - blueprint = self.Blueprint(app=self.app) - blueprint.shutdown_complete = Mock() - blueprint.shutdown_complete.wait.side_effect = KeyError('luke') - blueprint.join(timeout=10) - blueprint.shutdown_complete.wait.assert_called_with(timeout=10) - finally: - bootsteps.IGNORE_ERRORS = prev - - def test_connect_with(self): - - class b1s1(bootsteps.Step): - pass - - class b1s2(bootsteps.Step): - last = True - - class b2s1(bootsteps.Step): - pass - - class b2s2(bootsteps.Step): - last = True - - b1 = self.Blueprint([b1s1, b1s2], app=self.app) - b2 = self.Blueprint([b2s1, b2s2], app=self.app) - b1.apply(Mock()) - b2.apply(Mock()) - b1.connect_with(b2) - - self.assertIn(b1s1, b1.graph) - self.assertIn(b2s1, b1.graph) - self.assertIn(b2s2, b1.graph) - - self.assertTrue(repr(b1s1)) - self.assertTrue(str(b1s1)) - - def test_topsort_raises_KeyError(self): - - class Step(bootsteps.Step): - requires = ('xyxxx.fsdasewe.Unknown', ) - - b = self.Blueprint([Step], app=self.app) - b.steps = b.claim_steps() - with self.assertRaises(ImportError): - b._finalize_steps(b.steps) - Step.requires = () - - b.steps = b.claim_steps() - b._finalize_steps(b.steps) - - with patch('celery.bootsteps.DependencyGraph') as Dep: - g = Dep.return_value = Mock() - g.topsort.side_effect = KeyError('foo') - with self.assertRaises(KeyError): - b._finalize_steps(b.steps) - - def test_apply(self): - - class MyBlueprint(bootsteps.Blueprint): - name = 'test_apply' - - def modules(self): - return ['A', 'B'] - - class B(bootsteps.Step): - name = 'test_apply.B' - - class C(bootsteps.Step): - name = 'test_apply.C' - requires = [B] - - class A(bootsteps.Step): - name = 'test_apply.A' - requires = [C] - - class D(bootsteps.Step): - name = 'test_apply.D' - last = True - - x = MyBlueprint([A, D], app=self.app) - x.apply(self) - - self.assertIsInstance(x.order[0], B) - self.assertIsInstance(x.order[1], C) - self.assertIsInstance(x.order[2], A) - self.assertIsInstance(x.order[3], D) - self.assertIn(A, x.types) - self.assertIs(x[A.name], x.order[2]) - - def test_find_last_but_no_steps(self): - - class MyBlueprint(bootsteps.Blueprint): - name = 'qwejwioqjewoqiej' - - x = MyBlueprint(app=self.app) - x.apply(self) - self.assertIsNone(x._find_last()) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_components.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_components.py deleted file mode 100644 index b39865d..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_components.py +++ /dev/null @@ -1,38 +0,0 @@ -from __future__ import absolute_import - -# some of these are tested in test_worker, so I've only written tests -# here to complete coverage. Should move everyting to this module at some -# point [-ask] - -from celery.worker.components import ( - Queues, - Pool, -) - -from celery.tests.case import AppCase, Mock - - -class test_Queues(AppCase): - - def test_create_when_eventloop(self): - w = Mock() - w.use_eventloop = w.pool_putlocks = w.pool_cls.uses_semaphore = True - q = Queues(w) - q.create(w) - self.assertIs(w.process_task, w._process_task_sem) - - -class test_Pool(AppCase): - - def test_close_terminate(self): - w = Mock() - comp = Pool(w) - pool = w.pool = Mock() - comp.close(w) - pool.close.assert_called_with() - comp.terminate(w) - pool.terminate.assert_called_with() - - w.pool = None - comp.close(w) - comp.terminate(w) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_consumer.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_consumer.py deleted file mode 100644 index ea4f6bb..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_consumer.py +++ /dev/null @@ -1,512 +0,0 @@ -from __future__ import absolute_import - -import errno -import socket - -from billiard.exceptions import RestartFreqExceeded - -from celery.datastructures import LimitedSet -from celery.worker import state as worker_state -from celery.worker.consumer import ( - Consumer, - Heart, - Tasks, - Agent, - Mingle, - Gossip, - dump_body, - CLOSE, -) - -from celery.tests.case import AppCase, ContextMock, Mock, SkipTest, call, patch - - -class test_Consumer(AppCase): - - def get_consumer(self, no_hub=False, **kwargs): - consumer = Consumer( - on_task_request=Mock(), - init_callback=Mock(), - pool=Mock(), - app=self.app, - timer=Mock(), - controller=Mock(), - hub=None if no_hub else Mock(), - **kwargs - ) - consumer.blueprint = Mock() - consumer._restart_state = Mock() - consumer.connection = _amqp_connection() - consumer.connection_errors = (socket.error, OSError, ) - return consumer - - def test_taskbuckets_defaultdict(self): - c = self.get_consumer() - self.assertIsNone(c.task_buckets['fooxasdwx.wewe']) - - def test_dump_body_buffer(self): - msg = Mock() - msg.body = 'str' - try: - buf = buffer(msg.body) - except NameError: - raise SkipTest('buffer type not available') - self.assertTrue(dump_body(msg, buf)) - - def test_sets_heartbeat(self): - c = self.get_consumer(amqheartbeat=10) - self.assertEqual(c.amqheartbeat, 10) - self.app.conf.BROKER_HEARTBEAT = 20 - c = self.get_consumer(amqheartbeat=None) - self.assertEqual(c.amqheartbeat, 20) - - def test_gevent_bug_disables_connection_timeout(self): - with patch('celery.worker.consumer._detect_environment') as de: - de.return_value = 'gevent' - self.app.conf.BROKER_CONNECTION_TIMEOUT = 33.33 - self.get_consumer() - self.assertIsNone(self.app.conf.BROKER_CONNECTION_TIMEOUT) - - def test_limit_task(self): - c = self.get_consumer() - - with patch('celery.worker.consumer.task_reserved') as reserved: - bucket = Mock() - request = Mock() - bucket.can_consume.return_value = True - - c._limit_task(request, bucket, 3) - bucket.can_consume.assert_called_with(3) - reserved.assert_called_with(request) - c.on_task_request.assert_called_with(request) - - with patch('celery.worker.consumer.task_reserved') as reserved: - bucket.can_consume.return_value = False - bucket.expected_time.return_value = 3.33 - c._limit_task(request, bucket, 4) - bucket.can_consume.assert_called_with(4) - c.timer.call_after.assert_called_with( - 3.33, c._limit_task, (request, bucket, 4), - ) - bucket.expected_time.assert_called_with(4) - self.assertFalse(reserved.called) - - def test_start_blueprint_raises_EMFILE(self): - c = self.get_consumer() - exc = c.blueprint.start.side_effect = OSError() - exc.errno = errno.EMFILE - - with self.assertRaises(OSError): - c.start() - - def test_max_restarts_exceeded(self): - c = self.get_consumer() - - def se(*args, **kwargs): - c.blueprint.state = CLOSE - raise RestartFreqExceeded() - c._restart_state.step.side_effect = se - c.blueprint.start.side_effect = socket.error() - - with patch('celery.worker.consumer.sleep') as sleep: - c.start() - sleep.assert_called_with(1) - - def _closer(self, c): - def se(*args, **kwargs): - c.blueprint.state = CLOSE - return se - - def test_collects_at_restart(self): - c = self.get_consumer() - c.connection.collect.side_effect = MemoryError() - c.blueprint.start.side_effect = socket.error() - c.blueprint.restart.side_effect = self._closer(c) - c.start() - c.connection.collect.assert_called_with() - - def test_register_with_event_loop(self): - c = self.get_consumer() - c.register_with_event_loop(Mock(name='loop')) - - def test_on_close_clears_semaphore_timer_and_reqs(self): - with patch('celery.worker.consumer.reserved_requests') as reserved: - c = self.get_consumer() - c.on_close() - c.controller.semaphore.clear.assert_called_with() - c.timer.clear.assert_called_with() - reserved.clear.assert_called_with() - c.pool.flush.assert_called_with() - - c.controller = None - c.timer = None - c.pool = None - c.on_close() - - def test_connect_error_handler(self): - self.app.connection = _amqp_connection() - conn = self.app.connection.return_value - c = self.get_consumer() - self.assertTrue(c.connect()) - self.assertTrue(conn.ensure_connection.called) - errback = conn.ensure_connection.call_args[0][0] - conn.alt = [(1, 2, 3)] - errback(Mock(), 0) - - -class test_Heart(AppCase): - - def test_start(self): - c = Mock() - c.timer = Mock() - c.event_dispatcher = Mock() - - with patch('celery.worker.heartbeat.Heart') as hcls: - h = Heart(c) - self.assertTrue(h.enabled) - self.assertEqual(h.heartbeat_interval, None) - self.assertIsNone(c.heart) - - h.start(c) - self.assertTrue(c.heart) - hcls.assert_called_with(c.timer, c.event_dispatcher, - h.heartbeat_interval) - c.heart.start.assert_called_with() - - def test_start_heartbeat_interval(self): - c = Mock() - c.timer = Mock() - c.event_dispatcher = Mock() - - with patch('celery.worker.heartbeat.Heart') as hcls: - h = Heart(c, False, 20) - self.assertTrue(h.enabled) - self.assertEqual(h.heartbeat_interval, 20) - self.assertIsNone(c.heart) - - h.start(c) - self.assertTrue(c.heart) - hcls.assert_called_with(c.timer, c.event_dispatcher, - h.heartbeat_interval) - c.heart.start.assert_called_with() - - -class test_Tasks(AppCase): - - def test_stop(self): - c = Mock() - tasks = Tasks(c) - self.assertIsNone(c.task_consumer) - self.assertIsNone(c.qos) - - c.task_consumer = Mock() - tasks.stop(c) - - def test_stop_already_stopped(self): - c = Mock() - tasks = Tasks(c) - tasks.stop(c) - - -class test_Agent(AppCase): - - def test_start(self): - c = Mock() - agent = Agent(c) - agent.instantiate = Mock() - agent.agent_cls = 'foo:Agent' - self.assertIsNotNone(agent.create(c)) - agent.instantiate.assert_called_with(agent.agent_cls, c.connection) - - -class test_Mingle(AppCase): - - def test_start_no_replies(self): - c = Mock() - c.app.connection = _amqp_connection() - mingle = Mingle(c) - I = c.app.control.inspect.return_value = Mock() - I.hello.return_value = {} - mingle.start(c) - - def test_start(self): - try: - c = Mock() - c.app.connection = _amqp_connection() - mingle = Mingle(c) - self.assertTrue(mingle.enabled) - - Aig = LimitedSet() - Big = LimitedSet() - Aig.add('Aig-1') - Aig.add('Aig-2') - Big.add('Big-1') - - I = c.app.control.inspect.return_value = Mock() - I.hello.return_value = { - 'A@example.com': { - 'clock': 312, - 'revoked': Aig._data, - }, - 'B@example.com': { - 'clock': 29, - 'revoked': Big._data, - }, - 'C@example.com': { - 'error': 'unknown method', - }, - } - - mingle.start(c) - I.hello.assert_called_with(c.hostname, worker_state.revoked._data) - c.app.clock.adjust.assert_has_calls([ - call(312), call(29), - ], any_order=True) - self.assertIn('Aig-1', worker_state.revoked) - self.assertIn('Aig-2', worker_state.revoked) - self.assertIn('Big-1', worker_state.revoked) - finally: - worker_state.revoked.clear() - - -def _amqp_connection(): - connection = ContextMock() - connection.return_value = ContextMock() - connection.return_value.transport.driver_type = 'amqp' - return connection - - -class test_Gossip(AppCase): - - def test_init(self): - c = self.Consumer() - c.app.connection = _amqp_connection() - g = Gossip(c) - self.assertTrue(g.enabled) - self.assertIs(c.gossip, g) - - def test_callbacks(self): - c = self.Consumer() - c.app.connection = _amqp_connection() - g = Gossip(c) - on_node_join = Mock(name='on_node_join') - on_node_join2 = Mock(name='on_node_join2') - on_node_leave = Mock(name='on_node_leave') - on_node_lost = Mock(name='on.node_lost') - g.on.node_join.add(on_node_join) - g.on.node_join.add(on_node_join2) - g.on.node_leave.add(on_node_leave) - g.on.node_lost.add(on_node_lost) - - worker = Mock(name='worker') - g.on_node_join(worker) - on_node_join.assert_called_with(worker) - on_node_join2.assert_called_with(worker) - g.on_node_leave(worker) - on_node_leave.assert_called_with(worker) - g.on_node_lost(worker) - on_node_lost.assert_called_with(worker) - - def test_election(self): - c = self.Consumer() - c.app.connection = _amqp_connection() - g = Gossip(c) - g.start(c) - g.election('id', 'topic', 'action') - self.assertListEqual(g.consensus_replies['id'], []) - g.dispatcher.send.assert_called_with( - 'worker-elect', id='id', topic='topic', cver=1, action='action', - ) - - def test_call_task(self): - c = self.Consumer() - c.app.connection = _amqp_connection() - g = Gossip(c) - g.start(c) - - with patch('celery.worker.consumer.signature') as signature: - sig = signature.return_value = Mock() - task = Mock() - g.call_task(task) - signature.assert_called_with(task, app=c.app) - sig.apply_async.assert_called_with() - - sig.apply_async.side_effect = MemoryError() - with patch('celery.worker.consumer.error') as error: - g.call_task(task) - self.assertTrue(error.called) - - def Event(self, id='id', clock=312, - hostname='foo@example.com', pid=4312, - topic='topic', action='action', cver=1): - return { - 'id': id, - 'clock': clock, - 'hostname': hostname, - 'pid': pid, - 'topic': topic, - 'action': action, - 'cver': cver, - } - - def test_on_elect(self): - c = self.Consumer() - c.app.connection = _amqp_connection() - g = Gossip(c) - g.start(c) - - event = self.Event('id1') - g.on_elect(event) - in_heap = g.consensus_requests['id1'] - self.assertTrue(in_heap) - g.dispatcher.send.assert_called_with('worker-elect-ack', id='id1') - - event.pop('clock') - with patch('celery.worker.consumer.error') as error: - g.on_elect(event) - self.assertTrue(error.called) - - def Consumer(self, hostname='foo@x.com', pid=4312): - c = Mock() - c.app.connection = _amqp_connection() - c.hostname = hostname - c.pid = pid - return c - - def setup_election(self, g, c): - g.start(c) - g.clock = self.app.clock - self.assertNotIn('idx', g.consensus_replies) - self.assertIsNone(g.on_elect_ack({'id': 'idx'})) - - g.state.alive_workers.return_value = [ - 'foo@x.com', 'bar@x.com', 'baz@x.com', - ] - g.consensus_replies['id1'] = [] - g.consensus_requests['id1'] = [] - e1 = self.Event('id1', 1, 'foo@x.com') - e2 = self.Event('id1', 2, 'bar@x.com') - e3 = self.Event('id1', 3, 'baz@x.com') - g.on_elect(e1) - g.on_elect(e2) - g.on_elect(e3) - self.assertEqual(len(g.consensus_requests['id1']), 3) - - with patch('celery.worker.consumer.info'): - g.on_elect_ack(e1) - self.assertEqual(len(g.consensus_replies['id1']), 1) - g.on_elect_ack(e2) - self.assertEqual(len(g.consensus_replies['id1']), 2) - g.on_elect_ack(e3) - with self.assertRaises(KeyError): - g.consensus_replies['id1'] - - def test_on_elect_ack_win(self): - c = self.Consumer(hostname='foo@x.com') # I will win - g = Gossip(c) - handler = g.election_handlers['topic'] = Mock() - self.setup_election(g, c) - handler.assert_called_with('action') - - def test_on_elect_ack_lose(self): - c = self.Consumer(hostname='bar@x.com') # I will lose - c.app.connection = _amqp_connection() - g = Gossip(c) - handler = g.election_handlers['topic'] = Mock() - self.setup_election(g, c) - self.assertFalse(handler.called) - - def test_on_elect_ack_win_but_no_action(self): - c = self.Consumer(hostname='foo@x.com') # I will win - g = Gossip(c) - g.election_handlers = {} - with patch('celery.worker.consumer.error') as error: - self.setup_election(g, c) - self.assertTrue(error.called) - - def test_on_node_join(self): - c = self.Consumer() - g = Gossip(c) - with patch('celery.worker.consumer.debug') as debug: - g.on_node_join(c) - debug.assert_called_with('%s joined the party', 'foo@x.com') - - def test_on_node_leave(self): - c = self.Consumer() - g = Gossip(c) - with patch('celery.worker.consumer.debug') as debug: - g.on_node_leave(c) - debug.assert_called_with('%s left', 'foo@x.com') - - def test_on_node_lost(self): - c = self.Consumer() - g = Gossip(c) - with patch('celery.worker.consumer.info') as info: - g.on_node_lost(c) - info.assert_called_with('missed heartbeat from %s', 'foo@x.com') - - def test_register_timer(self): - c = self.Consumer() - g = Gossip(c) - g.register_timer() - c.timer.call_repeatedly.assert_called_with(g.interval, g.periodic) - tref = g._tref - g.register_timer() - tref.cancel.assert_called_with() - - def test_periodic(self): - c = self.Consumer() - g = Gossip(c) - g.on_node_lost = Mock() - state = g.state = Mock() - worker = Mock() - state.workers = {'foo': worker} - worker.alive = True - worker.hostname = 'foo' - g.periodic() - - worker.alive = False - g.periodic() - g.on_node_lost.assert_called_with(worker) - with self.assertRaises(KeyError): - state.workers['foo'] - - def test_on_message(self): - c = self.Consumer() - g = Gossip(c) - self.assertTrue(g.enabled) - prepare = Mock() - prepare.return_value = 'worker-online', {} - c.app.events.State.assert_called_with( - on_node_join=g.on_node_join, - on_node_leave=g.on_node_leave, - max_tasks_in_memory=1, - ) - g.update_state = Mock() - worker = Mock() - g.on_node_join = Mock() - g.on_node_leave = Mock() - g.update_state.return_value = worker, 1 - message = Mock() - message.delivery_info = {'routing_key': 'worker-online'} - message.headers = {'hostname': 'other'} - - handler = g.event_handlers['worker-online'] = Mock() - g.on_message(prepare, message) - handler.assert_called_with(message.payload) - g.event_handlers = {} - - g.on_message(prepare, message) - - message.delivery_info = {'routing_key': 'worker-offline'} - prepare.return_value = 'worker-offline', {} - g.on_message(prepare, message) - - message.delivery_info = {'routing_key': 'worker-baz'} - prepare.return_value = 'worker-baz', {} - g.update_state.return_value = worker, 0 - g.on_message(prepare, message) - - message.headers = {'hostname': g.hostname} - g.on_message(prepare, message) - g.clock.forward.assert_called_with() diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_control.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_control.py deleted file mode 100644 index 86bf550..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_control.py +++ /dev/null @@ -1,601 +0,0 @@ -from __future__ import absolute_import - -import sys -import socket - -from collections import defaultdict -from datetime import datetime, timedelta - -from kombu import pidbox - -from celery.datastructures import AttributeDict -from celery.five import Queue as FastQueue -from celery.utils import uuid -from celery.utils.timer2 import Timer -from celery.worker import WorkController as _WC -from celery.worker import consumer -from celery.worker import control -from celery.worker import state as worker_state -from celery.worker.job import Request -from celery.worker.state import revoked -from celery.worker.control import Panel -from celery.worker.pidbox import Pidbox, gPidbox - -from celery.tests.case import AppCase, Mock, call, patch - -hostname = socket.gethostname() - - -class WorkController(object): - autoscaler = None - - def stats(self): - return {'total': worker_state.total_count} - - -class Consumer(consumer.Consumer): - - def __init__(self, app): - self.app = app - self.buffer = FastQueue() - self.handle_task = self.buffer.put - self.timer = Timer() - self.event_dispatcher = Mock() - self.controller = WorkController() - self.task_consumer = Mock() - self.prefetch_multiplier = 1 - self.initial_prefetch_count = 1 - - from celery.concurrency.base import BasePool - self.pool = BasePool(10) - self.task_buckets = defaultdict(lambda: None) - - -class test_Pidbox(AppCase): - - def test_shutdown(self): - with patch('celery.worker.pidbox.ignore_errors') as eig: - parent = Mock() - pbox = Pidbox(parent) - pbox._close_channel = Mock() - self.assertIs(pbox.c, parent) - pconsumer = pbox.consumer = Mock() - cancel = pconsumer.cancel - pbox.shutdown(parent) - eig.assert_called_with(parent, cancel) - pbox._close_channel.assert_called_with(parent) - - -class test_Pidbox_green(AppCase): - - def test_stop(self): - parent = Mock() - g = gPidbox(parent) - stopped = g._node_stopped = Mock() - shutdown = g._node_shutdown = Mock() - close_chan = g._close_channel = Mock() - - g.stop(parent) - shutdown.set.assert_called_with() - stopped.wait.assert_called_with() - close_chan.assert_called_with(parent) - self.assertIsNone(g._node_stopped) - self.assertIsNone(g._node_shutdown) - - close_chan.reset() - g.stop(parent) - close_chan.assert_called_with(parent) - - def test_resets(self): - parent = Mock() - g = gPidbox(parent) - g._resets = 100 - g.reset() - self.assertEqual(g._resets, 101) - - def test_loop(self): - parent = Mock() - conn = parent.connect.return_value = self.app.connection() - drain = conn.drain_events = Mock() - g = gPidbox(parent) - parent.connection = Mock() - do_reset = g._do_reset = Mock() - - call_count = [0] - - def se(*args, **kwargs): - if call_count[0] > 2: - g._node_shutdown.set() - g.reset() - call_count[0] += 1 - drain.side_effect = se - g.loop(parent) - - self.assertEqual(do_reset.call_count, 4) - - -class test_ControlPanel(AppCase): - - def setup(self): - self.panel = self.create_panel(consumer=Consumer(self.app)) - - @self.app.task(name='c.unittest.mytask', rate_limit=200, shared=False) - def mytask(): - pass - self.mytask = mytask - - def create_state(self, **kwargs): - kwargs.setdefault('app', self.app) - kwargs.setdefault('hostname', hostname) - return AttributeDict(kwargs) - - def create_panel(self, **kwargs): - return self.app.control.mailbox.Node(hostname=hostname, - state=self.create_state(**kwargs), - handlers=Panel.data) - - def test_enable_events(self): - consumer = Consumer(self.app) - panel = self.create_panel(consumer=consumer) - evd = consumer.event_dispatcher - evd.groups = set() - panel.handle('enable_events') - self.assertFalse(evd.groups) - evd.groups = set(['worker']) - panel.handle('enable_events') - self.assertIn('task', evd.groups) - evd.groups = set(['task']) - self.assertIn('already enabled', panel.handle('enable_events')['ok']) - - def test_disable_events(self): - consumer = Consumer(self.app) - panel = self.create_panel(consumer=consumer) - evd = consumer.event_dispatcher - evd.enabled = True - evd.groups = set(['task']) - panel.handle('disable_events') - self.assertNotIn('task', evd.groups) - self.assertIn('already disabled', panel.handle('disable_events')['ok']) - - def test_clock(self): - consumer = Consumer(self.app) - panel = self.create_panel(consumer=consumer) - panel.state.app.clock.value = 313 - x = panel.handle('clock') - self.assertEqual(x['clock'], 313) - - def test_hello(self): - consumer = Consumer(self.app) - panel = self.create_panel(consumer=consumer) - panel.state.app.clock.value = 313 - worker_state.revoked.add('revoked1') - try: - x = panel.handle('hello', {'from_node': 'george@vandelay.com'}) - self.assertIn('revoked1', x['revoked']) - self.assertEqual(x['clock'], 314) # incremented - finally: - worker_state.revoked.discard('revoked1') - - def test_conf(self): - return - consumer = Consumer(self.app) - panel = self.create_panel(consumer=consumer) - self.app.conf.SOME_KEY6 = 'hello world' - x = panel.handle('dump_conf') - self.assertIn('SOME_KEY6', x) - - def test_election(self): - consumer = Consumer(self.app) - panel = self.create_panel(consumer=consumer) - consumer.gossip = Mock() - panel.handle( - 'election', {'id': 'id', 'topic': 'topic', 'action': 'action'}, - ) - consumer.gossip.election.assert_called_with('id', 'topic', 'action') - - def test_heartbeat(self): - consumer = Consumer(self.app) - panel = self.create_panel(consumer=consumer) - consumer.event_dispatcher.enabled = True - panel.handle('heartbeat') - self.assertIn(('worker-heartbeat', ), - consumer.event_dispatcher.send.call_args) - - def test_time_limit(self): - panel = self.create_panel(consumer=Mock()) - r = panel.handle('time_limit', arguments=dict( - task_name=self.mytask.name, hard=30, soft=10)) - self.assertEqual( - (self.mytask.time_limit, self.mytask.soft_time_limit), - (30, 10), - ) - self.assertIn('ok', r) - r = panel.handle('time_limit', arguments=dict( - task_name=self.mytask.name, hard=None, soft=None)) - self.assertEqual( - (self.mytask.time_limit, self.mytask.soft_time_limit), - (None, None), - ) - self.assertIn('ok', r) - - r = panel.handle('time_limit', arguments=dict( - task_name='248e8afya9s8dh921eh928', hard=30)) - self.assertIn('error', r) - - def test_active_queues(self): - import kombu - - x = kombu.Consumer(self.app.connection(), - [kombu.Queue('foo', kombu.Exchange('foo'), 'foo'), - kombu.Queue('bar', kombu.Exchange('bar'), 'bar')], - auto_declare=False) - consumer = Mock() - consumer.task_consumer = x - panel = self.create_panel(consumer=consumer) - r = panel.handle('active_queues') - self.assertListEqual(list(sorted(q['name'] for q in r)), - ['bar', 'foo']) - - def test_dump_tasks(self): - info = '\n'.join(self.panel.handle('dump_tasks')) - self.assertIn('mytask', info) - self.assertIn('rate_limit=200', info) - - def test_stats(self): - prev_count, worker_state.total_count = worker_state.total_count, 100 - try: - self.assertDictContainsSubset({'total': 100}, - self.panel.handle('stats')) - finally: - worker_state.total_count = prev_count - - def test_report(self): - self.panel.handle('report') - - def test_active(self): - r = Request({ - 'task': self.mytask.name, - 'id': 'do re mi', - 'args': (), - 'kwargs': {}, - }, app=self.app) - worker_state.active_requests.add(r) - try: - self.assertTrue(self.panel.handle('dump_active')) - finally: - worker_state.active_requests.discard(r) - - def test_pool_grow(self): - - class MockPool(object): - - def __init__(self, size=1): - self.size = size - - def grow(self, n=1): - self.size += n - - def shrink(self, n=1): - self.size -= n - - @property - def num_processes(self): - return self.size - - consumer = Consumer(self.app) - consumer.prefetch_multiplier = 8 - consumer.qos = Mock(name='qos') - consumer.pool = MockPool(1) - panel = self.create_panel(consumer=consumer) - - panel.handle('pool_grow') - self.assertEqual(consumer.pool.size, 2) - consumer.qos.increment_eventually.assert_called_with(8) - self.assertEqual(consumer.initial_prefetch_count, 16) - panel.handle('pool_shrink') - self.assertEqual(consumer.pool.size, 1) - consumer.qos.decrement_eventually.assert_called_with(8) - self.assertEqual(consumer.initial_prefetch_count, 8) - - panel.state.consumer = Mock() - panel.state.consumer.controller = Mock() - sc = panel.state.consumer.controller.autoscaler = Mock() - panel.handle('pool_grow') - self.assertTrue(sc.force_scale_up.called) - panel.handle('pool_shrink') - self.assertTrue(sc.force_scale_down.called) - - def test_add__cancel_consumer(self): - - class MockConsumer(object): - queues = [] - canceled = [] - consuming = False - - def add_queue(self, queue): - self.queues.append(queue.name) - - def consume(self): - self.consuming = True - - def cancel_by_queue(self, queue): - self.canceled.append(queue) - - def consuming_from(self, queue): - return queue in self.queues - - consumer = Consumer(self.app) - consumer.task_consumer = MockConsumer() - panel = self.create_panel(consumer=consumer) - - panel.handle('add_consumer', {'queue': 'MyQueue'}) - self.assertIn('MyQueue', consumer.task_consumer.queues) - self.assertTrue(consumer.task_consumer.consuming) - panel.handle('add_consumer', {'queue': 'MyQueue'}) - panel.handle('cancel_consumer', {'queue': 'MyQueue'}) - self.assertIn('MyQueue', consumer.task_consumer.canceled) - - def test_revoked(self): - worker_state.revoked.clear() - worker_state.revoked.add('a1') - worker_state.revoked.add('a2') - - try: - self.assertEqual(sorted(self.panel.handle('dump_revoked')), - ['a1', 'a2']) - finally: - worker_state.revoked.clear() - - def test_dump_schedule(self): - consumer = Consumer(self.app) - panel = self.create_panel(consumer=consumer) - self.assertFalse(panel.handle('dump_schedule')) - r = Request({ - 'task': self.mytask.name, - 'id': 'CAFEBABE', - 'args': (), - 'kwargs': {}, - }, app=self.app) - consumer.timer.schedule.enter_at( - consumer.timer.Entry(lambda x: x, (r, )), - datetime.now() + timedelta(seconds=10)) - consumer.timer.schedule.enter_at( - consumer.timer.Entry(lambda x: x, (object(), )), - datetime.now() + timedelta(seconds=10)) - self.assertTrue(panel.handle('dump_schedule')) - - def test_dump_reserved(self): - consumer = Consumer(self.app) - worker_state.reserved_requests.add(Request({ - 'task': self.mytask.name, - 'id': uuid(), - 'args': (2, 2), - 'kwargs': {}, - }, app=self.app)) - try: - panel = self.create_panel(consumer=consumer) - response = panel.handle('dump_reserved', {'safe': True}) - self.assertDictContainsSubset( - {'name': self.mytask.name, - 'args': (2, 2), - 'kwargs': {}, - 'hostname': socket.gethostname()}, - response[0], - ) - worker_state.reserved_requests.clear() - self.assertFalse(panel.handle('dump_reserved')) - finally: - worker_state.reserved_requests.clear() - - def test_rate_limit_invalid_rate_limit_string(self): - e = self.panel.handle('rate_limit', arguments=dict( - task_name='tasks.add', rate_limit='x1240301#%!')) - self.assertIn('Invalid rate limit string', e.get('error')) - - def test_rate_limit(self): - - class xConsumer(object): - reset = False - - def reset_rate_limits(self): - self.reset = True - - consumer = xConsumer() - panel = self.create_panel(app=self.app, consumer=consumer) - - task = self.app.tasks[self.mytask.name] - panel.handle('rate_limit', arguments=dict(task_name=task.name, - rate_limit='100/m')) - self.assertEqual(task.rate_limit, '100/m') - self.assertTrue(consumer.reset) - consumer.reset = False - panel.handle('rate_limit', arguments=dict(task_name=task.name, - rate_limit=0)) - self.assertEqual(task.rate_limit, 0) - self.assertTrue(consumer.reset) - - def test_rate_limit_nonexistant_task(self): - self.panel.handle('rate_limit', arguments={ - 'task_name': 'xxxx.does.not.exist', - 'rate_limit': '1000/s'}) - - def test_unexposed_command(self): - with self.assertRaises(KeyError): - self.panel.handle('foo', arguments={}) - - def test_revoke_with_name(self): - tid = uuid() - m = {'method': 'revoke', - 'destination': hostname, - 'arguments': {'task_id': tid, - 'task_name': self.mytask.name}} - self.panel.handle_message(m, None) - self.assertIn(tid, revoked) - - def test_revoke_with_name_not_in_registry(self): - tid = uuid() - m = {'method': 'revoke', - 'destination': hostname, - 'arguments': {'task_id': tid, - 'task_name': 'xxxxxxxxx33333333388888'}} - self.panel.handle_message(m, None) - self.assertIn(tid, revoked) - - def test_revoke(self): - tid = uuid() - m = {'method': 'revoke', - 'destination': hostname, - 'arguments': {'task_id': tid}} - self.panel.handle_message(m, None) - self.assertIn(tid, revoked) - - m = {'method': 'revoke', - 'destination': 'does.not.exist', - 'arguments': {'task_id': tid + 'xxx'}} - self.panel.handle_message(m, None) - self.assertNotIn(tid + 'xxx', revoked) - - def test_revoke_terminate(self): - request = Mock() - request.id = tid = uuid() - worker_state.reserved_requests.add(request) - try: - r = control.revoke(Mock(), tid, terminate=True) - self.assertIn(tid, revoked) - self.assertTrue(request.terminate.call_count) - self.assertIn('terminate:', r['ok']) - # unknown task id only revokes - r = control.revoke(Mock(), uuid(), terminate=True) - self.assertIn('tasks unknown', r['ok']) - finally: - worker_state.reserved_requests.discard(request) - - def test_autoscale(self): - self.panel.state.consumer = Mock() - self.panel.state.consumer.controller = Mock() - sc = self.panel.state.consumer.controller.autoscaler = Mock() - sc.update.return_value = 10, 2 - m = {'method': 'autoscale', - 'destination': hostname, - 'arguments': {'max': '10', 'min': '2'}} - r = self.panel.handle_message(m, None) - self.assertIn('ok', r) - - self.panel.state.consumer.controller.autoscaler = None - r = self.panel.handle_message(m, None) - self.assertIn('error', r) - - def test_ping(self): - m = {'method': 'ping', - 'destination': hostname} - r = self.panel.handle_message(m, None) - self.assertEqual(r, {'ok': 'pong'}) - - def test_shutdown(self): - m = {'method': 'shutdown', - 'destination': hostname} - with self.assertRaises(SystemExit): - self.panel.handle_message(m, None) - - def test_panel_reply(self): - - replies = [] - - class _Node(pidbox.Node): - - def reply(self, data, exchange, routing_key, **kwargs): - replies.append(data) - - panel = _Node(hostname=hostname, - state=self.create_state(consumer=Consumer(self.app)), - handlers=Panel.data, - mailbox=self.app.control.mailbox) - r = panel.dispatch('ping', reply_to={'exchange': 'x', - 'routing_key': 'x'}) - self.assertEqual(r, {'ok': 'pong'}) - self.assertDictEqual(replies[0], {panel.hostname: {'ok': 'pong'}}) - - def test_pool_restart(self): - consumer = Consumer(self.app) - consumer.controller = _WC(app=self.app) - consumer.controller.consumer = consumer - consumer.controller.pool.restart = Mock() - consumer.reset_rate_limits = Mock(name='reset_rate_limits()') - consumer.update_strategies = Mock(name='update_strategies()') - consumer.event_dispatcher = Mock(name='evd') - panel = self.create_panel(consumer=consumer) - assert panel.state.consumer.controller.consumer is consumer - panel.app = self.app - _import = panel.app.loader.import_from_cwd = Mock() - _reload = Mock() - - with self.assertRaises(ValueError): - panel.handle('pool_restart', {'reloader': _reload}) - - self.app.conf.CELERYD_POOL_RESTARTS = True - panel.handle('pool_restart', {'reloader': _reload}) - self.assertTrue(consumer.controller.pool.restart.called) - consumer.reset_rate_limits.assert_called_with() - consumer.update_strategies.assert_called_with() - self.assertFalse(_reload.called) - self.assertFalse(_import.called) - - def test_pool_restart_import_modules(self): - consumer = Consumer(self.app) - consumer.controller = _WC(app=self.app) - consumer.controller.consumer = consumer - consumer.controller.pool.restart = Mock() - consumer.reset_rate_limits = Mock(name='reset_rate_limits()') - consumer.update_strategies = Mock(name='update_strategies()') - panel = self.create_panel(consumer=consumer) - panel.app = self.app - assert panel.state.consumer.controller.consumer is consumer - _import = consumer.controller.app.loader.import_from_cwd = Mock() - _reload = Mock() - - self.app.conf.CELERYD_POOL_RESTARTS = True - panel.handle('pool_restart', {'modules': ['foo', 'bar'], - 'reloader': _reload}) - - self.assertTrue(consumer.controller.pool.restart.called) - consumer.reset_rate_limits.assert_called_with() - consumer.update_strategies.assert_called_with() - self.assertFalse(_reload.called) - self.assertItemsEqual( - [call('bar'), call('foo')], - _import.call_args_list, - ) - - def test_pool_restart_reload_modules(self): - consumer = Consumer(self.app) - consumer.controller = _WC(app=self.app) - consumer.controller.consumer = consumer - consumer.controller.pool.restart = Mock() - consumer.reset_rate_limits = Mock(name='reset_rate_limits()') - consumer.update_strategies = Mock(name='update_strategies()') - panel = self.create_panel(consumer=consumer) - panel.app = self.app - _import = panel.app.loader.import_from_cwd = Mock() - _reload = Mock() - - self.app.conf.CELERYD_POOL_RESTARTS = True - with patch.dict(sys.modules, {'foo': None}): - panel.handle('pool_restart', {'modules': ['foo'], - 'reload': False, - 'reloader': _reload}) - - self.assertTrue(consumer.controller.pool.restart.called) - self.assertFalse(_reload.called) - self.assertFalse(_import.called) - - _import.reset_mock() - _reload.reset_mock() - consumer.controller.pool.restart.reset_mock() - - panel.handle('pool_restart', {'modules': ['foo'], - 'reload': True, - 'reloader': _reload}) - - self.assertTrue(consumer.controller.pool.restart.called) - self.assertTrue(_reload.called) - self.assertFalse(_import.called) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_heartbeat.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_heartbeat.py deleted file mode 100644 index 50559ca..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_heartbeat.py +++ /dev/null @@ -1,73 +0,0 @@ -from __future__ import absolute_import - -from celery.worker.heartbeat import Heart -from celery.tests.case import AppCase - - -class MockDispatcher(object): - heart = None - next_iter = 0 - - def __init__(self): - self.sent = [] - self.on_enabled = set() - self.on_disabled = set() - self.enabled = True - - def send(self, msg, **_fields): - self.sent.append(msg) - if self.heart: - if self.next_iter > 10: - self.heart._shutdown.set() - self.next_iter += 1 - - -class MockDispatcherRaising(object): - - def send(self, msg): - if msg == 'worker-offline': - raise Exception('foo') - - -class MockTimer(object): - - def call_repeatedly(self, secs, fun, args=(), kwargs={}): - - class entry(tuple): - canceled = False - - def cancel(self): - self.canceled = True - - return entry((secs, fun, args, kwargs)) - - def cancel(self, entry): - entry.cancel() - - -class test_Heart(AppCase): - - def test_start_stop(self): - timer = MockTimer() - eventer = MockDispatcher() - h = Heart(timer, eventer, interval=1) - h.start() - self.assertTrue(h.tref) - h.stop() - self.assertIsNone(h.tref) - h.stop() - - def test_start_when_disabled(self): - timer = MockTimer() - eventer = MockDispatcher() - eventer.enabled = False - h = Heart(timer, eventer) - h.start() - self.assertFalse(h.tref) - - def test_stop_when_disabled(self): - timer = MockTimer() - eventer = MockDispatcher() - eventer.enabled = False - h = Heart(timer, eventer) - h.stop() diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_hub.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_hub.py deleted file mode 100644 index e84abf3..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_hub.py +++ /dev/null @@ -1,342 +0,0 @@ -from __future__ import absolute_import - -from kombu.async import Hub, READ, WRITE, ERR -from kombu.async.debug import callback_for, repr_flag, _rcb -from kombu.async.semaphore import DummyLock, LaxBoundedSemaphore - -from celery.five import range -from celery.tests.case import Case, Mock, call, patch - - -class File(object): - - def __init__(self, fd): - self.fd = fd - - def fileno(self): - return self.fd - - def __eq__(self, other): - if isinstance(other, File): - return self.fd == other.fd - return NotImplemented - - def __hash__(self): - return hash(self.fd) - - -class test_DummyLock(Case): - - def test_context(self): - mutex = DummyLock() - with mutex: - pass - - -class test_LaxBoundedSemaphore(Case): - - def test_acquire_release(self): - x = LaxBoundedSemaphore(2) - - c1 = Mock() - x.acquire(c1, 1) - self.assertEqual(x.value, 1) - c1.assert_called_with(1) - - c2 = Mock() - x.acquire(c2, 2) - self.assertEqual(x.value, 0) - c2.assert_called_with(2) - - c3 = Mock() - x.acquire(c3, 3) - self.assertEqual(x.value, 0) - self.assertFalse(c3.called) - - x.release() - self.assertEqual(x.value, 0) - x.release() - self.assertEqual(x.value, 1) - x.release() - self.assertEqual(x.value, 2) - c3.assert_called_with(3) - - def test_bounded(self): - x = LaxBoundedSemaphore(2) - for i in range(100): - x.release() - self.assertEqual(x.value, 2) - - def test_grow_shrink(self): - x = LaxBoundedSemaphore(1) - self.assertEqual(x.initial_value, 1) - cb1 = Mock() - x.acquire(cb1, 1) - cb1.assert_called_with(1) - self.assertEqual(x.value, 0) - - cb2 = Mock() - x.acquire(cb2, 2) - self.assertFalse(cb2.called) - self.assertEqual(x.value, 0) - - cb3 = Mock() - x.acquire(cb3, 3) - self.assertFalse(cb3.called) - - x.grow(2) - cb2.assert_called_with(2) - cb3.assert_called_with(3) - self.assertEqual(x.value, 2) - self.assertEqual(x.initial_value, 3) - - self.assertFalse(x._waiting) - x.grow(3) - for i in range(x.initial_value): - self.assertTrue(x.acquire(Mock())) - self.assertFalse(x.acquire(Mock())) - x.clear() - - x.shrink(3) - for i in range(x.initial_value): - self.assertTrue(x.acquire(Mock())) - self.assertFalse(x.acquire(Mock())) - self.assertEqual(x.value, 0) - - for i in range(100): - x.release() - self.assertEqual(x.value, x.initial_value) - - def test_clear(self): - x = LaxBoundedSemaphore(10) - for i in range(11): - x.acquire(Mock()) - self.assertTrue(x._waiting) - self.assertEqual(x.value, 0) - - x.clear() - self.assertFalse(x._waiting) - self.assertEqual(x.value, x.initial_value) - - -class test_Hub(Case): - - def test_repr_flag(self): - self.assertEqual(repr_flag(READ), 'R') - self.assertEqual(repr_flag(WRITE), 'W') - self.assertEqual(repr_flag(ERR), '!') - self.assertEqual(repr_flag(READ | WRITE), 'RW') - self.assertEqual(repr_flag(READ | ERR), 'R!') - self.assertEqual(repr_flag(WRITE | ERR), 'W!') - self.assertEqual(repr_flag(READ | WRITE | ERR), 'RW!') - - def test_repr_callback_rcb(self): - - def f(): - pass - - self.assertEqual(_rcb(f), f.__name__) - self.assertEqual(_rcb('foo'), 'foo') - - @patch('kombu.async.hub.poll') - def test_start_stop(self, poll): - hub = Hub() - poll.assert_called_with() - - poller = hub.poller - hub.stop() - hub.close() - poller.close.assert_called_with() - - def test_fire_timers(self): - hub = Hub() - hub.timer = Mock() - hub.timer._queue = [] - self.assertEqual(hub.fire_timers(min_delay=42.324, - max_delay=32.321), 32.321) - - hub.timer._queue = [1] - hub.scheduler = iter([(3.743, None)]) - self.assertEqual(hub.fire_timers(), 3.743) - - e1, e2, e3 = Mock(), Mock(), Mock() - entries = [e1, e2, e3] - - def reset(): - return [m.reset() for m in [e1, e2, e3]] - - def se(): - while 1: - while entries: - yield None, entries.pop() - yield 3.982, None - hub.scheduler = se() - - self.assertEqual(hub.fire_timers(max_timers=10), 3.982) - for E in [e3, e2, e1]: - E.assert_called_with() - reset() - - entries[:] = [Mock() for _ in range(11)] - keep = list(entries) - self.assertEqual(hub.fire_timers(max_timers=10, min_delay=1.13), 1.13) - for E in reversed(keep[1:]): - E.assert_called_with() - reset() - self.assertEqual(hub.fire_timers(max_timers=10), 3.982) - keep[0].assert_called_with() - - def test_fire_timers_raises(self): - hub = Hub() - eback = Mock() - eback.side_effect = KeyError('foo') - hub.timer = Mock() - hub.scheduler = iter([(0, eback)]) - with self.assertRaises(KeyError): - hub.fire_timers(propagate=(KeyError, )) - - eback.side_effect = ValueError('foo') - hub.scheduler = iter([(0, eback)]) - with patch('kombu.async.hub.logger') as logger: - with self.assertRaises(StopIteration): - hub.fire_timers() - self.assertTrue(logger.error.called) - - def test_add_raises_ValueError(self): - hub = Hub() - hub.poller = Mock(name='hub.poller') - hub.poller.register.side_effect = ValueError() - hub._discard = Mock(name='hub.discard') - with self.assertRaises(ValueError): - hub.add(2, Mock(), READ) - hub._discard.assert_called_with(2) - - def test_repr_active(self): - hub = Hub() - hub.readers = {1: Mock(), 2: Mock()} - hub.writers = {3: Mock(), 4: Mock()} - for value in list(hub.readers.values()) + list(hub.writers.values()): - value.__name__ = 'mock' - self.assertTrue(hub.repr_active()) - - def test_repr_events(self): - hub = Hub() - hub.readers = {6: Mock(), 7: Mock(), 8: Mock()} - hub.writers = {9: Mock()} - for value in list(hub.readers.values()) + list(hub.writers.values()): - value.__name__ = 'mock' - self.assertTrue(hub.repr_events([ - (6, READ), - (7, ERR), - (8, READ | ERR), - (9, WRITE), - (10, 13213), - ])) - - def test_callback_for(self): - hub = Hub() - reader, writer = Mock(), Mock() - hub.readers = {6: reader} - hub.writers = {7: writer} - - self.assertEqual(callback_for(hub, 6, READ), reader) - self.assertEqual(callback_for(hub, 7, WRITE), writer) - with self.assertRaises(KeyError): - callback_for(hub, 6, WRITE) - self.assertEqual(callback_for(hub, 6, WRITE, 'foo'), 'foo') - - def test_add_remove_readers(self): - hub = Hub() - P = hub.poller = Mock() - - read_A = Mock() - read_B = Mock() - hub.add_reader(10, read_A, 10) - hub.add_reader(File(11), read_B, 11) - - P.register.assert_has_calls([ - call(10, hub.READ | hub.ERR), - call(11, hub.READ | hub.ERR), - ], any_order=True) - - self.assertEqual(hub.readers[10], (read_A, (10, ))) - self.assertEqual(hub.readers[11], (read_B, (11, ))) - - hub.remove(10) - self.assertNotIn(10, hub.readers) - hub.remove(File(11)) - self.assertNotIn(11, hub.readers) - P.unregister.assert_has_calls([ - call(10), call(11), - ]) - - def test_can_remove_unknown_fds(self): - hub = Hub() - hub.poller = Mock() - hub.remove(30) - hub.remove(File(301)) - - def test_remove__unregister_raises(self): - hub = Hub() - hub.poller = Mock() - hub.poller.unregister.side_effect = OSError() - - hub.remove(313) - - def test_add_writers(self): - hub = Hub() - P = hub.poller = Mock() - - write_A = Mock() - write_B = Mock() - hub.add_writer(20, write_A) - hub.add_writer(File(21), write_B) - - P.register.assert_has_calls([ - call(20, hub.WRITE), - call(21, hub.WRITE), - ], any_order=True) - - self.assertEqual(hub.writers[20], (write_A, ())) - self.assertEqual(hub.writers[21], (write_B, ())) - - hub.remove(20) - self.assertNotIn(20, hub.writers) - hub.remove(File(21)) - self.assertNotIn(21, hub.writers) - P.unregister.assert_has_calls([ - call(20), call(21), - ]) - - def test_enter__exit(self): - hub = Hub() - P = hub.poller = Mock() - on_close = Mock() - hub.on_close.add(on_close) - - try: - read_A = Mock() - read_B = Mock() - hub.add_reader(10, read_A) - hub.add_reader(File(11), read_B) - write_A = Mock() - write_B = Mock() - hub.add_writer(20, write_A) - hub.add_writer(File(21), write_B) - self.assertTrue(hub.readers) - self.assertTrue(hub.writers) - finally: - assert hub.poller - hub.close() - self.assertFalse(hub.readers) - self.assertFalse(hub.writers) - - P.unregister.assert_has_calls([ - call(10), call(11), call(20), call(21), - ], any_order=True) - - on_close.assert_called_with(hub) - - def test_scheduler_property(self): - hub = Hub(timer=[1, 2, 3]) - self.assertEqual(list(hub.scheduler), [1, 2, 3]) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_loops.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_loops.py deleted file mode 100644 index be8d3a1..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_loops.py +++ /dev/null @@ -1,425 +0,0 @@ -from __future__ import absolute_import - -import socket - -from kombu.async import Hub, READ, WRITE, ERR - -from celery.bootsteps import CLOSE, RUN -from celery.exceptions import InvalidTaskError, WorkerShutdown, WorkerTerminate -from celery.five import Empty -from celery.worker import state -from celery.worker.consumer import Consumer -from celery.worker.loops import asynloop, synloop - -from celery.tests.case import AppCase, Mock, body_from_sig - - -class X(object): - - def __init__(self, app, heartbeat=None, on_task_message=None, - transport_driver_type=None): - hub = Hub() - ( - self.obj, - self.connection, - self.consumer, - self.blueprint, - self.hub, - self.qos, - self.heartbeat, - self.clock, - ) = self.args = [Mock(name='obj'), - Mock(name='connection'), - Mock(name='consumer'), - Mock(name='blueprint'), - hub, - Mock(name='qos'), - heartbeat, - Mock(name='clock')] - self.connection.supports_heartbeats = True - self.connection.get_heartbeat_interval.side_effect = ( - lambda: self.heartbeat - ) - self.consumer.callbacks = [] - self.obj.strategies = {} - self.connection.connection_errors = (socket.error, ) - if transport_driver_type: - self.connection.transport.driver_type = transport_driver_type - self.hub.readers = {} - self.hub.writers = {} - self.hub.consolidate = set() - self.hub.timer = Mock(name='hub.timer') - self.hub.timer._queue = [Mock()] - self.hub.fire_timers = Mock(name='hub.fire_timers') - self.hub.fire_timers.return_value = 1.7 - self.hub.poller = Mock(name='hub.poller') - self.hub.close = Mock(name='hub.close()') # asynloop calls hub.close - self.Hub = self.hub - self.blueprint.state = RUN - # need this for create_task_handler - _consumer = Consumer(Mock(), timer=Mock(), app=app) - _consumer.on_task_message = on_task_message or [] - self.obj.create_task_handler = _consumer.create_task_handler - self.on_unknown_message = self.obj.on_unknown_message = Mock( - name='on_unknown_message', - ) - _consumer.on_unknown_message = self.on_unknown_message - self.on_unknown_task = self.obj.on_unknown_task = Mock( - name='on_unknown_task', - ) - _consumer.on_unknown_task = self.on_unknown_task - self.on_invalid_task = self.obj.on_invalid_task = Mock( - name='on_invalid_task', - ) - _consumer.on_invalid_task = self.on_invalid_task - _consumer.strategies = self.obj.strategies - - def timeout_then_error(self, mock): - - def first(*args, **kwargs): - mock.side_effect = socket.error() - self.connection.more_to_read = False - raise socket.timeout() - mock.side_effect = first - - def close_then_error(self, mock=None, mod=0, exc=None): - mock = Mock() if mock is None else mock - - def first(*args, **kwargs): - if not mod or mock.call_count > mod: - self.close() - self.connection.more_to_read = False - raise (socket.error() if exc is None else exc) - mock.side_effect = first - return mock - - def close(self, *args, **kwargs): - self.blueprint.state = CLOSE - - def closer(self, mock=None, mod=0): - mock = Mock() if mock is None else mock - - def closing(*args, **kwargs): - if not mod or mock.call_count >= mod: - self.close() - mock.side_effect = closing - return mock - - -def get_task_callback(*args, **kwargs): - x = X(*args, **kwargs) - x.blueprint.state = CLOSE - asynloop(*x.args) - return x, x.consumer.callbacks[0] - - -class test_asynloop(AppCase): - - def setup(self): - - @self.app.task(shared=False) - def add(x, y): - return x + y - self.add = add - - def test_drain_after_consume(self): - x, _ = get_task_callback(self.app, transport_driver_type='amqp') - self.assertIn( - x.connection.drain_events, [p.fun for p in x.hub._ready], - ) - - def test_setup_heartbeat(self): - x = X(self.app, heartbeat=10) - x.hub.call_repeatedly = Mock(name='x.hub.call_repeatedly()') - x.blueprint.state = CLOSE - asynloop(*x.args) - x.consumer.consume.assert_called_with() - x.obj.on_ready.assert_called_with() - x.hub.call_repeatedly.assert_called_with( - 10 / 2.0, x.connection.heartbeat_check, 2.0, - ) - - def task_context(self, sig, **kwargs): - x, on_task = get_task_callback(self.app, **kwargs) - body = body_from_sig(self.app, sig) - message = Mock() - strategy = x.obj.strategies[sig.task] = Mock() - return x, on_task, body, message, strategy - - def test_on_task_received(self): - _, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2)) - on_task(body, msg) - strategy.assert_called_with( - msg, body, msg.ack_log_error, msg.reject_log_error, [], - ) - - def test_on_task_received_executes_on_task_message(self): - cbs = [Mock(), Mock(), Mock()] - _, on_task, body, msg, strategy = self.task_context( - self.add.s(2, 2), on_task_message=cbs, - ) - on_task(body, msg) - strategy.assert_called_with( - msg, body, msg.ack_log_error, msg.reject_log_error, cbs, - ) - - def test_on_task_message_missing_name(self): - x, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2)) - body.pop('task') - on_task(body, msg) - x.on_unknown_message.assert_called_with(body, msg) - - def test_on_task_not_registered(self): - x, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2)) - exc = strategy.side_effect = KeyError(self.add.name) - on_task(body, msg) - x.on_unknown_task.assert_called_with(body, msg, exc) - - def test_on_task_InvalidTaskError(self): - x, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2)) - exc = strategy.side_effect = InvalidTaskError() - on_task(body, msg) - x.on_invalid_task.assert_called_with(body, msg, exc) - - def test_should_terminate(self): - x = X(self.app) - # XXX why aren't the errors propagated?!? - state.should_terminate = True - try: - with self.assertRaises(WorkerTerminate): - asynloop(*x.args) - finally: - state.should_terminate = False - - def test_should_terminate_hub_close_raises(self): - x = X(self.app) - # XXX why aren't the errors propagated?!? - state.should_terminate = True - x.hub.close.side_effect = MemoryError() - try: - with self.assertRaises(WorkerTerminate): - asynloop(*x.args) - finally: - state.should_terminate = False - - def test_should_stop(self): - x = X(self.app) - state.should_stop = True - try: - with self.assertRaises(WorkerShutdown): - asynloop(*x.args) - finally: - state.should_stop = False - - def test_updates_qos(self): - x = X(self.app) - x.qos.prev = 3 - x.qos.value = 3 - x.hub.on_tick.add(x.closer(mod=2)) - x.hub.timer._queue = [1] - asynloop(*x.args) - self.assertFalse(x.qos.update.called) - - x = X(self.app) - x.qos.prev = 1 - x.qos.value = 6 - x.hub.on_tick.add(x.closer(mod=2)) - asynloop(*x.args) - x.qos.update.assert_called_with() - x.hub.fire_timers.assert_called_with(propagate=(socket.error, )) - - def test_poll_empty(self): - x = X(self.app) - x.hub.readers = {6: Mock()} - x.hub.timer._queue = [1] - x.close_then_error(x.hub.poller.poll) - x.hub.fire_timers.return_value = 33.37 - poller = x.hub.poller - poller.poll.return_value = [] - with self.assertRaises(socket.error): - asynloop(*x.args) - poller.poll.assert_called_with(33.37) - - def test_poll_readable(self): - x = X(self.app) - reader = Mock(name='reader') - x.hub.add_reader(6, reader, 6) - x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), mod=4)) - poller = x.hub.poller - poller.poll.return_value = [(6, READ)] - with self.assertRaises(socket.error): - asynloop(*x.args) - reader.assert_called_with(6) - self.assertTrue(poller.poll.called) - - def test_poll_readable_raises_Empty(self): - x = X(self.app) - reader = Mock(name='reader') - x.hub.add_reader(6, reader, 6) - x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) - poller = x.hub.poller - poller.poll.return_value = [(6, READ)] - reader.side_effect = Empty() - with self.assertRaises(socket.error): - asynloop(*x.args) - reader.assert_called_with(6) - self.assertTrue(poller.poll.called) - - def test_poll_writable(self): - x = X(self.app) - writer = Mock(name='writer') - x.hub.add_writer(6, writer, 6) - x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) - poller = x.hub.poller - poller.poll.return_value = [(6, WRITE)] - with self.assertRaises(socket.error): - asynloop(*x.args) - writer.assert_called_with(6) - self.assertTrue(poller.poll.called) - - def test_poll_writable_none_registered(self): - x = X(self.app) - writer = Mock(name='writer') - x.hub.add_writer(6, writer, 6) - x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) - poller = x.hub.poller - poller.poll.return_value = [(7, WRITE)] - with self.assertRaises(socket.error): - asynloop(*x.args) - self.assertTrue(poller.poll.called) - - def test_poll_unknown_event(self): - x = X(self.app) - writer = Mock(name='reader') - x.hub.add_writer(6, writer, 6) - x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) - poller = x.hub.poller - poller.poll.return_value = [(6, 0)] - with self.assertRaises(socket.error): - asynloop(*x.args) - self.assertTrue(poller.poll.called) - - def test_poll_keep_draining_disabled(self): - x = X(self.app) - x.hub.writers = {6: Mock()} - poll = x.hub.poller.poll - - def se(*args, **kwargs): - poll.side_effect = socket.error() - poll.side_effect = se - - poller = x.hub.poller - poll.return_value = [(6, 0)] - with self.assertRaises(socket.error): - asynloop(*x.args) - self.assertTrue(poller.poll.called) - - def test_poll_err_writable(self): - x = X(self.app) - writer = Mock(name='writer') - x.hub.add_writer(6, writer, 6, 48) - x.hub.on_tick.add(x.close_then_error(Mock(), 2)) - poller = x.hub.poller - poller.poll.return_value = [(6, ERR)] - with self.assertRaises(socket.error): - asynloop(*x.args) - writer.assert_called_with(6, 48) - self.assertTrue(poller.poll.called) - - def test_poll_write_generator(self): - x = X(self.app) - x.hub.remove = Mock(name='hub.remove()') - - def Gen(): - yield 1 - yield 2 - gen = Gen() - - x.hub.add_writer(6, gen) - x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) - x.hub.poller.poll.return_value = [(6, WRITE)] - with self.assertRaises(socket.error): - asynloop(*x.args) - self.assertTrue(gen.gi_frame.f_lasti != -1) - self.assertFalse(x.hub.remove.called) - - def test_poll_write_generator_stopped(self): - x = X(self.app) - - def Gen(): - raise StopIteration() - yield - gen = Gen() - x.hub.add_writer(6, gen) - x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) - x.hub.poller.poll.return_value = [(6, WRITE)] - x.hub.remove = Mock(name='hub.remove()') - with self.assertRaises(socket.error): - asynloop(*x.args) - self.assertIsNone(gen.gi_frame) - - def test_poll_write_generator_raises(self): - x = X(self.app) - - def Gen(): - raise ValueError('foo') - yield - gen = Gen() - x.hub.add_writer(6, gen) - x.hub.remove = Mock(name='hub.remove()') - x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) - x.hub.poller.poll.return_value = [(6, WRITE)] - with self.assertRaises(ValueError): - asynloop(*x.args) - self.assertIsNone(gen.gi_frame) - x.hub.remove.assert_called_with(6) - - def test_poll_err_readable(self): - x = X(self.app) - reader = Mock(name='reader') - x.hub.add_reader(6, reader, 6, 24) - x.hub.on_tick.add(x.close_then_error(Mock(), 2)) - poller = x.hub.poller - poller.poll.return_value = [(6, ERR)] - with self.assertRaises(socket.error): - asynloop(*x.args) - reader.assert_called_with(6, 24) - self.assertTrue(poller.poll.called) - - def test_poll_raises_ValueError(self): - x = X(self.app) - x.hub.readers = {6: Mock()} - poller = x.hub.poller - x.close_then_error(poller.poll, exc=ValueError) - asynloop(*x.args) - self.assertTrue(poller.poll.called) - - -class test_synloop(AppCase): - - def test_timeout_ignored(self): - x = X(self.app) - x.timeout_then_error(x.connection.drain_events) - with self.assertRaises(socket.error): - synloop(*x.args) - self.assertEqual(x.connection.drain_events.call_count, 2) - - def test_updates_qos_when_changed(self): - x = X(self.app) - x.qos.prev = 2 - x.qos.value = 2 - x.timeout_then_error(x.connection.drain_events) - with self.assertRaises(socket.error): - synloop(*x.args) - self.assertFalse(x.qos.update.called) - - x.qos.value = 4 - x.timeout_then_error(x.connection.drain_events) - with self.assertRaises(socket.error): - synloop(*x.args) - x.qos.update.assert_called_with() - - def test_ignores_socket_errors_when_closed(self): - x = X(self.app) - x.close_then_error(x.connection.drain_events) - self.assertIsNone(synloop(*x.args)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_request.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_request.py deleted file mode 100644 index 16efcd7..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_request.py +++ /dev/null @@ -1,969 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import, unicode_literals - -import anyjson -import os -import signal -import socket -import sys - -from datetime import datetime, timedelta - -from billiard.einfo import ExceptionInfo -from kombu.transport.base import Message -from kombu.utils.encoding import from_utf8, default_encode - -from celery import states -from celery.app.trace import ( - trace_task, - _trace_task_ret, - TraceInfo, - mro_lookup, - build_tracer, - setup_worker_optimizations, - reset_worker_optimizations, -) -from celery.concurrency.base import BasePool -from celery.exceptions import ( - Ignore, - InvalidTaskError, - Retry, - TaskRevokedError, - Terminated, - WorkerLostError, -) -from celery.five import keys, monotonic -from celery.signals import task_revoked -from celery.utils import uuid -from celery.worker import job as module -from celery.worker.job import Request, logger as req_logger -from celery.worker.state import revoked - -from celery.tests.case import ( - AppCase, - Case, - Mock, - SkipTest, - assert_signal_called, - body_from_sig, - patch, -) - - -class test_mro_lookup(Case): - - def test_order(self): - - class A(object): - pass - - class B(A): - pass - - class C(B): - pass - - class D(C): - - @classmethod - def mro(cls): - return () - - A.x = 10 - self.assertEqual(mro_lookup(C, 'x'), A) - self.assertIsNone(mro_lookup(C, 'x', stop=(A, ))) - B.x = 10 - self.assertEqual(mro_lookup(C, 'x'), B) - C.x = 10 - self.assertEqual(mro_lookup(C, 'x'), C) - self.assertIsNone(mro_lookup(D, 'x')) - - -def jail(app, task_id, name, args, kwargs): - request = {'id': task_id} - task = app.tasks[name] - task.__trace__ = None # rebuild - return trace_task( - task, task_id, args, kwargs, request=request, eager=False, app=app, - ) - - -class test_default_encode(AppCase): - - def setup(self): - if sys.version_info >= (3, 0): - raise SkipTest('py3k: not relevant') - - def test_jython(self): - prev, sys.platform = sys.platform, 'java 1.6.1' - try: - self.assertEqual(default_encode(bytes('foo')), 'foo') - finally: - sys.platform = prev - - def test_cpython(self): - prev, sys.platform = sys.platform, 'darwin' - gfe, sys.getfilesystemencoding = ( - sys.getfilesystemencoding, - lambda: 'utf-8', - ) - try: - self.assertEqual(default_encode(bytes('foo')), 'foo') - finally: - sys.platform = prev - sys.getfilesystemencoding = gfe - - -class test_Retry(AppCase): - - def test_retry_semipredicate(self): - try: - raise Exception('foo') - except Exception as exc: - ret = Retry('Retrying task', exc) - self.assertEqual(ret.exc, exc) - - -class test_trace_task(AppCase): - - def setup(self): - - @self.app.task(shared=False) - def mytask(i, **kwargs): - return i ** i - self.mytask = mytask - - @self.app.task(shared=False) - def mytask_raising(i): - raise KeyError(i) - self.mytask_raising = mytask_raising - - @patch('celery.app.trace._logger') - def test_process_cleanup_fails(self, _logger): - self.mytask.backend = Mock() - self.mytask.backend.process_cleanup = Mock(side_effect=KeyError()) - tid = uuid() - ret = jail(self.app, tid, self.mytask.name, [2], {}) - self.assertEqual(ret, 4) - self.assertTrue(self.mytask.backend.store_result.called) - self.assertIn('Process cleanup failed', _logger.error.call_args[0][0]) - - def test_process_cleanup_BaseException(self): - self.mytask.backend = Mock() - self.mytask.backend.process_cleanup = Mock(side_effect=SystemExit()) - with self.assertRaises(SystemExit): - jail(self.app, uuid(), self.mytask.name, [2], {}) - - def test_execute_jail_success(self): - ret = jail(self.app, uuid(), self.mytask.name, [2], {}) - self.assertEqual(ret, 4) - - def test_marked_as_started(self): - _started = [] - - def store_result(tid, meta, state, **kwars): - if state == states.STARTED: - _started.append(tid) - self.mytask.backend.store_result = Mock(name='store_result') - self.mytask.backend.store_result.side_effect = store_result - self.mytask.track_started = True - - tid = uuid() - jail(self.app, tid, self.mytask.name, [2], {}) - self.assertIn(tid, _started) - - self.mytask.ignore_result = True - tid = uuid() - jail(self.app, tid, self.mytask.name, [2], {}) - self.assertNotIn(tid, _started) - - def test_execute_jail_failure(self): - ret = jail( - self.app, uuid(), self.mytask_raising.name, [4], {}, - ) - self.assertIsInstance(ret, ExceptionInfo) - self.assertTupleEqual(ret.exception.args, (4, )) - - def test_execute_ignore_result(self): - - @self.app.task(shared=False, ignore_result=True) - def ignores_result(i): - return i ** i - - task_id = uuid() - ret = jail(self.app, task_id, ignores_result.name, [4], {}) - self.assertEqual(ret, 256) - self.assertFalse(self.app.AsyncResult(task_id).ready()) - - -class MockEventDispatcher(object): - - def __init__(self): - self.sent = [] - self.enabled = True - - def send(self, event, **fields): - self.sent.append(event) - - -class test_Request(AppCase): - - def setup(self): - - @self.app.task(shared=False) - def add(x, y, **kw_): - return x + y - self.add = add - - @self.app.task(shared=False) - def mytask(i, **kwargs): - return i ** i - self.mytask = mytask - - @self.app.task(shared=False) - def mytask_raising(i): - raise KeyError(i) - self.mytask_raising = mytask_raising - - def get_request(self, sig, Request=Request, **kwargs): - return Request( - body_from_sig(self.app, sig), - on_ack=Mock(), - eventer=Mock(), - app=self.app, - connection_errors=(socket.error, ), - task=sig.type, - **kwargs - ) - - def test_invalid_eta_raises_InvalidTaskError(self): - with self.assertRaises(InvalidTaskError): - self.get_request(self.add.s(2, 2).set(eta='12345')) - - def test_invalid_expires_raises_InvalidTaskError(self): - with self.assertRaises(InvalidTaskError): - self.get_request(self.add.s(2, 2).set(expires='12345')) - - def test_valid_expires_with_utc_makes_aware(self): - with patch('celery.worker.job.maybe_make_aware') as mma: - self.get_request(self.add.s(2, 2).set(expires=10)) - self.assertTrue(mma.called) - - def test_maybe_expire_when_expires_is_None(self): - req = self.get_request(self.add.s(2, 2)) - self.assertFalse(req.maybe_expire()) - - def test_on_retry_acks_if_late(self): - self.add.acks_late = True - req = self.get_request(self.add.s(2, 2)) - req.on_retry(Mock()) - req.on_ack.assert_called_with(req_logger, req.connection_errors) - - def test_on_failure_Termianted(self): - einfo = None - try: - raise Terminated('9') - except Terminated: - einfo = ExceptionInfo() - self.assertIsNotNone(einfo) - req = self.get_request(self.add.s(2, 2)) - req.on_failure(einfo) - req.eventer.send.assert_called_with( - 'task-revoked', - uuid=req.id, terminated=True, signum='9', expired=False, - ) - - def test_log_error_propagates_MemoryError(self): - einfo = None - try: - raise MemoryError() - except MemoryError: - einfo = ExceptionInfo(internal=True) - self.assertIsNotNone(einfo) - req = self.get_request(self.add.s(2, 2)) - with self.assertRaises(MemoryError): - req._log_error(einfo) - - def test_log_error_when_Ignore(self): - einfo = None - try: - raise Ignore() - except Ignore: - einfo = ExceptionInfo(internal=True) - self.assertIsNotNone(einfo) - req = self.get_request(self.add.s(2, 2)) - req._log_error(einfo) - req.on_ack.assert_called_with(req_logger, req.connection_errors) - - def test_tzlocal_is_cached(self): - req = self.get_request(self.add.s(2, 2)) - req._tzlocal = 'foo' - self.assertEqual(req.tzlocal, 'foo') - - def test_execute_magic_kwargs(self): - task = self.add.s(2, 2) - task.freeze() - req = self.get_request(task) - self.add.accept_magic_kwargs = True - pool = Mock() - req.execute_using_pool(pool) - self.assertTrue(pool.apply_async.called) - args = pool.apply_async.call_args[1]['args'] - self.assertEqual(args[0], task.task) - self.assertEqual(args[1], task.id) - self.assertEqual(args[2], task.args) - kwargs = args[3] - self.assertEqual(kwargs.get('task_name'), task.task) - - def xRequest(self, body=None, **kwargs): - body = dict({'task': self.mytask.name, - 'id': uuid(), - 'args': [1], - 'kwargs': {'f': 'x'}}, **body or {}) - return Request(body, app=self.app, **kwargs) - - def test_task_wrapper_repr(self): - self.assertTrue(repr(self.xRequest())) - - @patch('celery.worker.job.kwdict') - def test_kwdict(self, kwdict): - prev, module.NEEDS_KWDICT = module.NEEDS_KWDICT, True - try: - self.xRequest() - self.assertTrue(kwdict.called) - finally: - module.NEEDS_KWDICT = prev - - def test_sets_store_errors(self): - self.mytask.ignore_result = True - job = self.xRequest() - self.assertFalse(job.store_errors) - - self.mytask.store_errors_even_if_ignored = True - job = self.xRequest() - self.assertTrue(job.store_errors) - - def test_send_event(self): - job = self.xRequest() - job.eventer = MockEventDispatcher() - job.send_event('task-frobulated') - self.assertIn('task-frobulated', job.eventer.sent) - - def test_send_events__disabled_at_task_level(self): - job = self.xRequest() - job.task.send_events = False - job.eventer = Mock(name='.eventer') - job.send_event('task-frobulated') - job.eventer.send.assert_not_called() - - def test_on_retry(self): - job = Request({ - 'task': self.mytask.name, - 'id': uuid(), - 'args': [1], - 'kwargs': {'f': 'x'}, - }, app=self.app) - job.eventer = MockEventDispatcher() - try: - raise Retry('foo', KeyError('moofoobar')) - except: - einfo = ExceptionInfo() - job.on_failure(einfo) - self.assertIn('task-retried', job.eventer.sent) - prev, module._does_info = module._does_info, False - try: - job.on_failure(einfo) - finally: - module._does_info = prev - einfo.internal = True - job.on_failure(einfo) - - def test_compat_properties(self): - job = Request({ - 'task': self.mytask.name, - 'id': uuid(), - 'args': [1], - 'kwargs': {'f': 'x'}, - }, app=self.app) - self.assertEqual(job.task_id, job.id) - self.assertEqual(job.task_name, job.name) - job.task_id = 'ID' - self.assertEqual(job.id, 'ID') - job.task_name = 'NAME' - self.assertEqual(job.name, 'NAME') - - def test_terminate__task_started(self): - pool = Mock() - signum = signal.SIGTERM - job = Request({ - 'task': self.mytask.name, - 'id': uuid(), - 'args': [1], - 'kwrgs': {'f': 'x'}, - }, app=self.app) - with assert_signal_called( - task_revoked, sender=job.task, request=job, - terminated=True, expired=False, signum=signum): - job.time_start = monotonic() - job.worker_pid = 313 - job.terminate(pool, signal='TERM') - pool.terminate_job.assert_called_with(job.worker_pid, signum) - - def test_terminate__task_reserved(self): - pool = Mock() - job = Request({ - 'task': self.mytask.name, - 'id': uuid(), - 'args': [1], - 'kwargs': {'f': 'x'}, - }, app=self.app) - job.time_start = None - job.terminate(pool, signal='TERM') - self.assertFalse(pool.terminate_job.called) - self.assertTupleEqual(job._terminate_on_ack, (pool, 15)) - job.terminate(pool, signal='TERM') - - def test_revoked_expires_expired(self): - job = Request({ - 'task': self.mytask.name, - 'id': uuid(), - 'args': [1], - 'kwargs': {'f': 'x'}, - 'expires': datetime.utcnow() - timedelta(days=1), - }, app=self.app) - with assert_signal_called( - task_revoked, sender=job.task, request=job, - terminated=False, expired=True, signum=None): - job.revoked() - self.assertIn(job.id, revoked) - self.assertEqual( - self.mytask.backend.get_status(job.id), - states.REVOKED, - ) - - def test_revoked_expires_not_expired(self): - job = self.xRequest({ - 'expires': datetime.utcnow() + timedelta(days=1), - }) - job.revoked() - self.assertNotIn(job.id, revoked) - self.assertNotEqual( - self.mytask.backend.get_status(job.id), - states.REVOKED, - ) - - def test_revoked_expires_ignore_result(self): - self.mytask.ignore_result = True - job = self.xRequest({ - 'expires': datetime.utcnow() - timedelta(days=1), - }) - job.revoked() - self.assertIn(job.id, revoked) - self.assertNotEqual( - self.mytask.backend.get_status(job.id), states.REVOKED, - ) - - def test_send_email(self): - app = self.app - mail_sent = [False] - - def mock_mail_admins(*args, **kwargs): - mail_sent[0] = True - - def get_ei(): - try: - raise KeyError('moofoobar') - except: - return ExceptionInfo() - - app.mail_admins = mock_mail_admins - self.mytask.send_error_emails = True - job = self.xRequest() - einfo = get_ei() - job.on_failure(einfo) - self.assertTrue(mail_sent[0]) - - einfo = get_ei() - mail_sent[0] = False - self.mytask.send_error_emails = False - job.on_failure(einfo) - self.assertFalse(mail_sent[0]) - - einfo = get_ei() - mail_sent[0] = False - self.mytask.send_error_emails = True - job.on_failure(einfo) - self.assertTrue(mail_sent[0]) - - def test_already_revoked(self): - job = self.xRequest() - job._already_revoked = True - self.assertTrue(job.revoked()) - - def test_revoked(self): - job = self.xRequest() - with assert_signal_called( - task_revoked, sender=job.task, request=job, - terminated=False, expired=False, signum=None): - revoked.add(job.id) - self.assertTrue(job.revoked()) - self.assertTrue(job._already_revoked) - self.assertTrue(job.acknowledged) - - def test_execute_does_not_execute_revoked(self): - job = self.xRequest() - revoked.add(job.id) - job.execute() - - def test_execute_acks_late(self): - self.mytask_raising.acks_late = True - job = self.xRequest({ - 'task': self.mytask_raising.name, - 'kwargs': {}, - }) - job.execute() - self.assertTrue(job.acknowledged) - job.execute() - - def test_execute_using_pool_does_not_execute_revoked(self): - job = self.xRequest() - revoked.add(job.id) - with self.assertRaises(TaskRevokedError): - job.execute_using_pool(None) - - def test_on_accepted_acks_early(self): - job = self.xRequest() - job.on_accepted(pid=os.getpid(), time_accepted=monotonic()) - self.assertTrue(job.acknowledged) - prev, module._does_debug = module._does_debug, False - try: - job.on_accepted(pid=os.getpid(), time_accepted=monotonic()) - finally: - module._does_debug = prev - - def test_on_accepted_acks_late(self): - job = self.xRequest() - self.mytask.acks_late = True - job.on_accepted(pid=os.getpid(), time_accepted=monotonic()) - self.assertFalse(job.acknowledged) - - def test_on_accepted_terminates(self): - signum = signal.SIGTERM - pool = Mock() - job = self.xRequest() - with assert_signal_called( - task_revoked, sender=job.task, request=job, - terminated=True, expired=False, signum=signum): - job.terminate(pool, signal='TERM') - self.assertFalse(pool.terminate_job.call_count) - job.on_accepted(pid=314, time_accepted=monotonic()) - pool.terminate_job.assert_called_with(314, signum) - - def test_on_success_acks_early(self): - job = self.xRequest() - job.time_start = 1 - job.on_success(42) - prev, module._does_info = module._does_info, False - try: - job.on_success(42) - self.assertFalse(job.acknowledged) - finally: - module._does_info = prev - - def test_on_success_BaseException(self): - job = self.xRequest() - job.time_start = 1 - with self.assertRaises(SystemExit): - try: - raise SystemExit() - except SystemExit: - job.on_success(ExceptionInfo()) - else: - assert False - - def test_on_success_eventer(self): - job = self.xRequest() - job.time_start = 1 - job.eventer = Mock() - job.eventer.send = Mock() - job.on_success(42) - self.assertTrue(job.eventer.send.called) - - def test_on_success_when_failure(self): - job = self.xRequest() - job.time_start = 1 - job.on_failure = Mock() - try: - raise KeyError('foo') - except Exception: - job.on_success(ExceptionInfo()) - self.assertTrue(job.on_failure.called) - - def test_on_success_acks_late(self): - job = self.xRequest() - job.time_start = 1 - self.mytask.acks_late = True - job.on_success(42) - self.assertTrue(job.acknowledged) - - def test_on_failure_WorkerLostError(self): - - def get_ei(): - try: - raise WorkerLostError('do re mi') - except WorkerLostError: - return ExceptionInfo() - - job = self.xRequest() - exc_info = get_ei() - job.on_failure(exc_info) - self.assertEqual( - self.mytask.backend.get_status(job.id), states.FAILURE, - ) - - self.mytask.ignore_result = True - exc_info = get_ei() - job = self.xRequest() - job.on_failure(exc_info) - self.assertEqual( - self.mytask.backend.get_status(job.id), states.PENDING, - ) - - def test_on_failure_acks_late(self): - job = self.xRequest() - job.time_start = 1 - self.mytask.acks_late = True - try: - raise KeyError('foo') - except KeyError: - exc_info = ExceptionInfo() - job.on_failure(exc_info) - self.assertTrue(job.acknowledged) - - def test_from_message_invalid_kwargs(self): - body = dict(task=self.mytask.name, id=1, args=(), kwargs='foo') - with self.assertRaises(InvalidTaskError): - Request(body, message=None, app=self.app) - - @patch('celery.worker.job.error') - @patch('celery.worker.job.warn') - def test_on_timeout(self, warn, error): - - job = self.xRequest() - job.on_timeout(soft=True, timeout=1337) - self.assertIn('Soft time limit', warn.call_args[0][0]) - job.on_timeout(soft=False, timeout=1337) - self.assertIn('Hard time limit', error.call_args[0][0]) - self.assertEqual( - self.mytask.backend.get_status(job.id), states.FAILURE, - ) - - self.mytask.ignore_result = True - job = self.xRequest() - job.on_timeout(soft=True, timeout=1336) - self.assertEqual( - self.mytask.backend.get_status(job.id), states.PENDING, - ) - - def test_fast_trace_task(self): - from celery.app import trace - setup_worker_optimizations(self.app) - self.assertIs(trace.trace_task_ret, trace._fast_trace_task) - try: - self.mytask.__trace__ = build_tracer( - self.mytask.name, self.mytask, self.app.loader, 'test', - app=self.app, - ) - res = trace.trace_task_ret(self.mytask.name, uuid(), [4], {}) - self.assertEqual(res, 4 ** 4) - finally: - reset_worker_optimizations() - self.assertIs(trace.trace_task_ret, trace._trace_task_ret) - delattr(self.mytask, '__trace__') - res = trace.trace_task_ret( - self.mytask.name, uuid(), [4], {}, app=self.app, - ) - self.assertEqual(res, 4 ** 4) - - def test_trace_task_ret(self): - self.mytask.__trace__ = build_tracer( - self.mytask.name, self.mytask, self.app.loader, 'test', - app=self.app, - ) - res = _trace_task_ret(self.mytask.name, uuid(), [4], {}, app=self.app) - self.assertEqual(res, 4 ** 4) - - def test_trace_task_ret__no_trace(self): - try: - delattr(self.mytask, '__trace__') - except AttributeError: - pass - res = _trace_task_ret(self.mytask.name, uuid(), [4], {}, app=self.app) - self.assertEqual(res, 4 ** 4) - - def test_trace_catches_exception(self): - - def _error_exec(self, *args, **kwargs): - raise KeyError('baz') - - @self.app.task(request=None, shared=False) - def raising(): - raise KeyError('baz') - - with self.assertWarnsRegex(RuntimeWarning, - r'Exception raised outside'): - res = trace_task(raising, uuid(), [], {}, app=self.app) - self.assertIsInstance(res, ExceptionInfo) - - def test_worker_task_trace_handle_retry(self): - tid = uuid() - self.mytask.push_request(id=tid) - try: - raise ValueError('foo') - except Exception as exc: - try: - raise Retry(str(exc), exc=exc) - except Retry as exc: - w = TraceInfo(states.RETRY, exc) - w.handle_retry(self.mytask, store_errors=False) - self.assertEqual( - self.mytask.backend.get_status(tid), states.PENDING, - ) - w.handle_retry(self.mytask, store_errors=True) - self.assertEqual( - self.mytask.backend.get_status(tid), states.RETRY, - ) - finally: - self.mytask.pop_request() - - def test_worker_task_trace_handle_failure(self): - tid = uuid() - self.mytask.push_request() - try: - self.mytask.request.id = tid - try: - raise ValueError('foo') - except Exception as exc: - w = TraceInfo(states.FAILURE, exc) - w.handle_failure(self.mytask, store_errors=False) - self.assertEqual( - self.mytask.backend.get_status(tid), states.PENDING, - ) - w.handle_failure(self.mytask, store_errors=True) - self.assertEqual( - self.mytask.backend.get_status(tid), states.FAILURE, - ) - finally: - self.mytask.pop_request() - - def test_task_wrapper_mail_attrs(self): - job = self.xRequest({'args': [], 'kwargs': {}}) - x = job.success_msg % { - 'name': job.name, - 'id': job.id, - 'return_value': 10, - 'runtime': 0.3641, - } - self.assertTrue(x) - x = job.error_msg % { - 'name': job.name, - 'id': job.id, - 'exc': 'FOOBARBAZ', - 'description': 'raised unexpected', - 'traceback': 'foobarbaz', - } - self.assertTrue(x) - - def test_from_message(self): - us = 'æØåveéðƒeæ' - body = {'task': self.mytask.name, 'id': uuid(), - 'args': [2], 'kwargs': {us: 'bar'}} - m = Message(None, body=anyjson.dumps(body), backend='foo', - content_type='application/json', - content_encoding='utf-8') - job = Request(m.decode(), message=m, app=self.app) - self.assertIsInstance(job, Request) - self.assertEqual(job.name, body['task']) - self.assertEqual(job.id, body['id']) - self.assertEqual(job.args, body['args']) - us = from_utf8(us) - if sys.version_info < (2, 6): - self.assertEqual(next(keys(job.kwargs)), us) - self.assertIsInstance(next(keys(job.kwargs)), str) - - def test_from_message_empty_args(self): - body = {'task': self.mytask.name, 'id': uuid()} - m = Message(None, body=anyjson.dumps(body), backend='foo', - content_type='application/json', - content_encoding='utf-8') - job = Request(m.decode(), message=m, app=self.app) - self.assertIsInstance(job, Request) - self.assertEqual(job.args, []) - self.assertEqual(job.kwargs, {}) - - def test_from_message_missing_required_fields(self): - body = {} - m = Message(None, body=anyjson.dumps(body), backend='foo', - content_type='application/json', - content_encoding='utf-8') - with self.assertRaises(KeyError): - Request(m.decode(), message=m, app=self.app) - - def test_from_message_nonexistant_task(self): - body = {'task': 'cu.mytask.doesnotexist', 'id': uuid(), - 'args': [2], 'kwargs': {'æØåveéðƒeæ': 'bar'}} - m = Message(None, body=anyjson.dumps(body), backend='foo', - content_type='application/json', - content_encoding='utf-8') - with self.assertRaises(KeyError): - Request(m.decode(), message=m, app=self.app) - - def test_execute(self): - tid = uuid() - job = self.xRequest({'id': tid, 'args': [4], 'kwargs': {}}) - self.assertEqual(job.execute(), 256) - meta = self.mytask.backend.get_task_meta(tid) - self.assertEqual(meta['status'], states.SUCCESS) - self.assertEqual(meta['result'], 256) - - def test_execute_success_no_kwargs(self): - - @self.app.task # traverses coverage for decorator without parens - def mytask_no_kwargs(i): - return i ** i - - tid = uuid() - job = self.xRequest({ - 'task': mytask_no_kwargs.name, - 'id': tid, - 'args': [4], - 'kwargs': {}, - }) - self.assertEqual(job.execute(), 256) - meta = mytask_no_kwargs.backend.get_task_meta(tid) - self.assertEqual(meta['result'], 256) - self.assertEqual(meta['status'], states.SUCCESS) - - def test_execute_success_some_kwargs(self): - scratch = {'task_id': None} - - @self.app.task(shared=False, accept_magic_kwargs=True) - def mytask_some_kwargs(i, task_id): - scratch['task_id'] = task_id - return i ** i - - tid = uuid() - job = self.xRequest({ - 'task': mytask_some_kwargs.name, - 'id': tid, - 'args': [4], - 'kwargs': {}, - }) - self.assertEqual(job.execute(), 256) - meta = mytask_some_kwargs.backend.get_task_meta(tid) - self.assertEqual(scratch.get('task_id'), tid) - self.assertEqual(meta['result'], 256) - self.assertEqual(meta['status'], states.SUCCESS) - - def test_execute_ack(self): - scratch = {'ACK': False} - - def on_ack(*args, **kwargs): - scratch['ACK'] = True - - tid = uuid() - job = self.xRequest({'id': tid, 'args': [4]}, on_ack=on_ack) - self.assertEqual(job.execute(), 256) - meta = self.mytask.backend.get_task_meta(tid) - self.assertTrue(scratch['ACK']) - self.assertEqual(meta['result'], 256) - self.assertEqual(meta['status'], states.SUCCESS) - - def test_execute_fail(self): - tid = uuid() - job = self.xRequest({ - 'task': self.mytask_raising.name, - 'id': tid, - 'args': [4], - 'kwargs': {}, - }) - self.assertIsInstance(job.execute(), ExceptionInfo) - meta = self.mytask_raising.backend.get_task_meta(tid) - self.assertEqual(meta['status'], states.FAILURE) - self.assertIsInstance(meta['result'], KeyError) - - def test_execute_using_pool(self): - tid = uuid() - job = self.xRequest({'id': tid, 'args': [4]}) - - class MockPool(BasePool): - target = None - args = None - kwargs = None - - def __init__(self, *args, **kwargs): - pass - - def apply_async(self, target, args=None, kwargs=None, - *margs, **mkwargs): - self.target = target - self.args = args - self.kwargs = kwargs - - p = MockPool() - job.execute_using_pool(p) - self.assertTrue(p.target) - self.assertEqual(p.args[0], self.mytask.name) - self.assertEqual(p.args[1], tid) - self.assertEqual(p.args[2], [4]) - self.assertIn('f', p.args[3]) - self.assertIn([4], p.args) - - job.task.accept_magic_kwargs = False - job.execute_using_pool(p) - - def test_default_kwargs(self): - self.maxDiff = 3000 - tid = uuid() - job = self.xRequest({'id': tid, 'args': [4]}) - self.assertDictEqual( - job.extend_with_default_kwargs(), { - 'f': 'x', - 'logfile': None, - 'loglevel': None, - 'task_id': job.id, - 'task_retries': 0, - 'task_is_eager': False, - 'delivery_info': { - 'exchange': None, - 'routing_key': None, - 'priority': 0, - 'redelivered': False, - }, - 'task_name': job.name}) - - @patch('celery.worker.job.logger') - def _test_on_failure(self, exception, logger): - app = self.app - tid = uuid() - job = self.xRequest({'id': tid, 'args': [4]}) - try: - raise exception - except Exception: - exc_info = ExceptionInfo() - app.conf.CELERY_SEND_TASK_ERROR_EMAILS = True - job.on_failure(exc_info) - self.assertTrue(logger.log.called) - context = logger.log.call_args[0][2] - self.assertEqual(self.mytask.name, context['name']) - self.assertIn(tid, context['id']) - - def test_on_failure(self): - self._test_on_failure(Exception('Inside unit tests')) - - def test_on_failure_unicode_exception(self): - self._test_on_failure(Exception('Бобры атакуют')) - - def test_on_failure_utf8_exception(self): - self._test_on_failure(Exception( - from_utf8('Бобры атакуют'))) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_revoke.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_revoke.py deleted file mode 100644 index 4d5ad02..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_revoke.py +++ /dev/null @@ -1,13 +0,0 @@ -from __future__ import absolute_import - -from celery.worker import state -from celery.tests.case import AppCase - - -class test_revoked(AppCase): - - def test_is_working(self): - state.revoked.add('foo') - self.assertIn('foo', state.revoked) - state.revoked.pop_value('foo') - self.assertNotIn('foo', state.revoked) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_state.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_state.py deleted file mode 100644 index ede9a00..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_state.py +++ /dev/null @@ -1,161 +0,0 @@ -from __future__ import absolute_import - -import pickle - -from time import time - -from celery.datastructures import LimitedSet -from celery.exceptions import WorkerShutdown, WorkerTerminate -from celery.worker import state - -from celery.tests.case import AppCase, Mock, patch - - -class StateResetCase(AppCase): - - def setup(self): - self.reset_state() - - def teardown(self): - self.reset_state() - - def reset_state(self): - state.active_requests.clear() - state.revoked.clear() - state.total_count.clear() - - -class MockShelve(dict): - filename = None - in_sync = False - closed = False - - def open(self, filename, **kwargs): - self.filename = filename - return self - - def sync(self): - self.in_sync = True - - def close(self): - self.closed = True - - -class MyPersistent(state.Persistent): - storage = MockShelve() - - -class test_maybe_shutdown(AppCase): - - def teardown(self): - state.should_stop = False - state.should_terminate = False - - def test_should_stop(self): - state.should_stop = True - with self.assertRaises(WorkerShutdown): - state.maybe_shutdown() - - def test_should_terminate(self): - state.should_terminate = True - with self.assertRaises(WorkerTerminate): - state.maybe_shutdown() - - -class test_Persistent(StateResetCase): - - def setup(self): - self.reset_state() - self.p = MyPersistent(state, filename='celery-state') - - def test_close_twice(self): - self.p._is_open = False - self.p.close() - - def test_constructor(self): - self.assertDictEqual(self.p.db, {}) - self.assertEqual(self.p.db.filename, self.p.filename) - - def test_save(self): - self.p.db['foo'] = 'bar' - self.p.save() - self.assertTrue(self.p.db.in_sync) - self.assertTrue(self.p.db.closed) - - def add_revoked(self, *ids): - for id in ids: - self.p.db.setdefault('revoked', LimitedSet()).add(id) - - def test_merge(self, data=['foo', 'bar', 'baz']): - self.add_revoked(*data) - self.p.merge() - for item in data: - self.assertIn(item, state.revoked) - - def test_merge_dict(self): - self.p.clock = Mock() - self.p.clock.adjust.return_value = 626 - d = {'revoked': {'abc': time()}, 'clock': 313} - self.p._merge_with(d) - self.p.clock.adjust.assert_called_with(313) - self.assertEqual(d['clock'], 626) - self.assertIn('abc', state.revoked) - - def test_sync_clock_and_purge(self): - passthrough = Mock() - passthrough.side_effect = lambda x: x - with patch('celery.worker.state.revoked') as revoked: - d = {'clock': 0} - self.p.clock = Mock() - self.p.clock.forward.return_value = 627 - self.p._dumps = passthrough - self.p.compress = passthrough - self.p._sync_with(d) - revoked.purge.assert_called_with() - self.assertEqual(d['clock'], 627) - self.assertNotIn('revoked', d) - self.assertIs(d['zrevoked'], revoked) - - def test_sync(self, data1=['foo', 'bar', 'baz'], - data2=['baz', 'ini', 'koz']): - self.add_revoked(*data1) - for item in data2: - state.revoked.add(item) - self.p.sync() - - self.assertTrue(self.p.db['zrevoked']) - pickled = self.p.decompress(self.p.db['zrevoked']) - self.assertTrue(pickled) - saved = pickle.loads(pickled) - for item in data2: - self.assertIn(item, saved) - - -class SimpleReq(object): - - def __init__(self, name): - self.name = name - - -class test_state(StateResetCase): - - def test_accepted(self, requests=[SimpleReq('foo'), - SimpleReq('bar'), - SimpleReq('baz'), - SimpleReq('baz')]): - for request in requests: - state.task_accepted(request) - for req in requests: - self.assertIn(req, state.active_requests) - self.assertEqual(state.total_count['foo'], 1) - self.assertEqual(state.total_count['bar'], 1) - self.assertEqual(state.total_count['baz'], 2) - - def test_ready(self, requests=[SimpleReq('foo'), - SimpleReq('bar')]): - for request in requests: - state.task_accepted(request) - self.assertEqual(len(state.active_requests), 2) - for request in requests: - state.task_ready(request) - self.assertEqual(len(state.active_requests), 0) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_strategy.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_strategy.py deleted file mode 100644 index 7edf78b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_strategy.py +++ /dev/null @@ -1,139 +0,0 @@ -from __future__ import absolute_import - -from collections import defaultdict -from contextlib import contextmanager - -from kombu.utils.limits import TokenBucket - -from celery.worker import state -from celery.utils.timeutils import rate - -from celery.tests.case import AppCase, Mock, patch, body_from_sig - - -class test_default_strategy(AppCase): - - def setup(self): - @self.app.task(shared=False) - def add(x, y): - return x + y - - self.add = add - - class Context(object): - - def __init__(self, sig, s, reserved, consumer, message, body): - self.sig = sig - self.s = s - self.reserved = reserved - self.consumer = consumer - self.message = message - self.body = body - - def __call__(self, **kwargs): - return self.s( - self.message, self.body, - self.message.ack, self.message.reject, [], **kwargs - ) - - def was_reserved(self): - return self.reserved.called - - def was_rate_limited(self): - assert not self.was_reserved() - return self.consumer._limit_task.called - - def was_scheduled(self): - assert not self.was_reserved() - assert not self.was_rate_limited() - return self.consumer.timer.call_at.called - - def event_sent(self): - return self.consumer.event_dispatcher.send.call_args - - def get_request(self): - if self.was_reserved(): - return self.reserved.call_args[0][0] - if self.was_rate_limited(): - return self.consumer._limit_task.call_args[0][0] - if self.was_scheduled(): - return self.consumer.timer.call_at.call_args[0][0] - raise ValueError('request not handled') - - @contextmanager - def _context(self, sig, - rate_limits=True, events=True, utc=True, limit=None): - self.assertTrue(sig.type.Strategy) - - reserved = Mock() - consumer = Mock() - consumer.task_buckets = defaultdict(lambda: None) - if limit: - bucket = TokenBucket(rate(limit), capacity=1) - consumer.task_buckets[sig.task] = bucket - consumer.disable_rate_limits = not rate_limits - consumer.event_dispatcher.enabled = events - s = sig.type.start_strategy(self.app, consumer, task_reserved=reserved) - self.assertTrue(s) - - message = Mock() - body = body_from_sig(self.app, sig, utc=utc) - - yield self.Context(sig, s, reserved, consumer, message, body) - - def test_when_logging_disabled(self): - with patch('celery.worker.strategy.logger') as logger: - logger.isEnabledFor.return_value = False - with self._context(self.add.s(2, 2)) as C: - C() - self.assertFalse(logger.info.called) - - def test_task_strategy(self): - with self._context(self.add.s(2, 2)) as C: - C() - self.assertTrue(C.was_reserved()) - req = C.get_request() - C.consumer.on_task_request.assert_called_with(req) - self.assertTrue(C.event_sent()) - - def test_when_events_disabled(self): - with self._context(self.add.s(2, 2), events=False) as C: - C() - self.assertTrue(C.was_reserved()) - self.assertFalse(C.event_sent()) - - def test_eta_task(self): - with self._context(self.add.s(2, 2).set(countdown=10)) as C: - C() - self.assertTrue(C.was_scheduled()) - C.consumer.qos.increment_eventually.assert_called_with() - - def test_eta_task_utc_disabled(self): - with self._context(self.add.s(2, 2).set(countdown=10), utc=False) as C: - C() - self.assertTrue(C.was_scheduled()) - C.consumer.qos.increment_eventually.assert_called_with() - - def test_when_rate_limited(self): - task = self.add.s(2, 2) - with self._context(task, rate_limits=True, limit='1/m') as C: - C() - self.assertTrue(C.was_rate_limited()) - - def test_when_rate_limited__limits_disabled(self): - task = self.add.s(2, 2) - with self._context(task, rate_limits=False, limit='1/m') as C: - C() - self.assertTrue(C.was_reserved()) - - def test_when_revoked(self): - task = self.add.s(2, 2) - task.freeze() - state.revoked.add(task.id) - try: - with self._context(task) as C: - C() - with self.assertRaises(ValueError): - C.get_request() - finally: - state.revoked.discard(task.id) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_worker.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_worker.py deleted file mode 100644 index b700a6c..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_worker.py +++ /dev/null @@ -1,1128 +0,0 @@ -from __future__ import absolute_import, print_function - -import os -import socket - -from collections import deque -from datetime import datetime, timedelta -from threading import Event - -from amqp import ChannelError -from kombu import Connection -from kombu.common import QoS, ignore_errors -from kombu.transport.base import Message - -from celery.app.defaults import DEFAULTS -from celery.bootsteps import RUN, CLOSE, StartStopStep -from celery.concurrency.base import BasePool -from celery.datastructures import AttributeDict -from celery.exceptions import ( - WorkerShutdown, WorkerTerminate, TaskRevokedError, -) -from celery.five import Empty, range, Queue as FastQueue -from celery.utils import uuid -from celery.worker import components -from celery.worker import consumer -from celery.worker.consumer import Consumer as __Consumer -from celery.worker.job import Request -from celery.utils import worker_direct -from celery.utils.serialization import pickle -from celery.utils.timer2 import Timer - -from celery.tests.case import AppCase, Mock, SkipTest, patch, restore_logging - - -def MockStep(step=None): - step = Mock() if step is None else step - step.blueprint = Mock() - step.blueprint.name = 'MockNS' - step.name = 'MockStep(%s)' % (id(step), ) - return step - - -def mock_event_dispatcher(): - evd = Mock(name='event_dispatcher') - evd.groups = ['worker'] - evd._outbound_buffer = deque() - return evd - - -class PlaceHolder(object): - pass - - -def find_step(obj, typ): - return obj.blueprint.steps[typ.name] - - -class Consumer(__Consumer): - - def __init__(self, *args, **kwargs): - kwargs.setdefault('without_mingle', True) # disable Mingle step - kwargs.setdefault('without_gossip', True) # disable Gossip step - kwargs.setdefault('without_heartbeat', True) # disable Heart step - super(Consumer, self).__init__(*args, **kwargs) - - -class _MyKombuConsumer(Consumer): - broadcast_consumer = Mock() - task_consumer = Mock() - - def __init__(self, *args, **kwargs): - kwargs.setdefault('pool', BasePool(2)) - super(_MyKombuConsumer, self).__init__(*args, **kwargs) - - def restart_heartbeat(self): - self.heart = None - - -class MyKombuConsumer(Consumer): - - def loop(self, *args, **kwargs): - pass - - -class MockNode(object): - commands = [] - - def handle_message(self, body, message): - self.commands.append(body.pop('command', None)) - - -class MockEventDispatcher(object): - sent = [] - closed = False - flushed = False - _outbound_buffer = [] - - def send(self, event, *args, **kwargs): - self.sent.append(event) - - def close(self): - self.closed = True - - def flush(self): - self.flushed = True - - -class MockHeart(object): - closed = False - - def stop(self): - self.closed = True - - -def create_message(channel, **data): - data.setdefault('id', uuid()) - channel.no_ack_consumers = set() - m = Message(channel, body=pickle.dumps(dict(**data)), - content_type='application/x-python-serialize', - content_encoding='binary', - delivery_info={'consumer_tag': 'mock'}) - m.accept = ['application/x-python-serialize'] - return m - - -class test_Consumer(AppCase): - - def setup(self): - self.buffer = FastQueue() - self.timer = Timer() - - @self.app.task(shared=False) - def foo_task(x, y, z): - return x * y * z - self.foo_task = foo_task - - def teardown(self): - self.timer.stop() - - def test_info(self): - l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - l.task_consumer = Mock() - l.qos = QoS(l.task_consumer.qos, 10) - l.connection = Mock() - l.connection.info.return_value = {'foo': 'bar'} - l.controller = l.app.WorkController() - l.controller.pool = Mock() - l.controller.pool.info.return_value = [Mock(), Mock()] - l.controller.consumer = l - info = l.controller.stats() - self.assertEqual(info['prefetch_count'], 10) - self.assertTrue(info['broker']) - - def test_start_when_closed(self): - l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - l.blueprint.state = CLOSE - l.start() - - def test_connection(self): - l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - - l.blueprint.start(l) - self.assertIsInstance(l.connection, Connection) - - l.blueprint.state = RUN - l.event_dispatcher = None - l.blueprint.restart(l) - self.assertTrue(l.connection) - - l.blueprint.state = RUN - l.shutdown() - self.assertIsNone(l.connection) - self.assertIsNone(l.task_consumer) - - l.blueprint.start(l) - self.assertIsInstance(l.connection, Connection) - l.blueprint.restart(l) - - l.stop() - l.shutdown() - self.assertIsNone(l.connection) - self.assertIsNone(l.task_consumer) - - def test_close_connection(self): - l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - l.blueprint.state = RUN - step = find_step(l, consumer.Connection) - conn = l.connection = Mock() - step.shutdown(l) - self.assertTrue(conn.close.called) - self.assertIsNone(l.connection) - - l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - eventer = l.event_dispatcher = mock_event_dispatcher() - eventer.enabled = True - heart = l.heart = MockHeart() - l.blueprint.state = RUN - Events = find_step(l, consumer.Events) - Events.shutdown(l) - Heart = find_step(l, consumer.Heart) - Heart.shutdown(l) - self.assertTrue(eventer.close.call_count) - self.assertTrue(heart.closed) - - @patch('celery.worker.consumer.warn') - def test_receive_message_unknown(self, warn): - l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - l.blueprint.state = RUN - l.steps.pop() - backend = Mock() - m = create_message(backend, unknown={'baz': '!!!'}) - l.event_dispatcher = mock_event_dispatcher() - l.node = MockNode() - - callback = self._get_on_message(l) - callback(m.decode(), m) - self.assertTrue(warn.call_count) - - @patch('celery.worker.strategy.to_timestamp') - def test_receive_message_eta_OverflowError(self, to_timestamp): - to_timestamp.side_effect = OverflowError() - l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - l.blueprint.state = RUN - l.steps.pop() - m = create_message(Mock(), task=self.foo_task.name, - args=('2, 2'), - kwargs={}, - eta=datetime.now().isoformat()) - l.event_dispatcher = mock_event_dispatcher() - l.node = MockNode() - l.update_strategies() - l.qos = Mock() - - callback = self._get_on_message(l) - callback(m.decode(), m) - self.assertTrue(m.acknowledged) - - @patch('celery.worker.consumer.error') - def test_receive_message_InvalidTaskError(self, error): - l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - l.blueprint.state = RUN - l.event_dispatcher = mock_event_dispatcher() - l.steps.pop() - m = create_message(Mock(), task=self.foo_task.name, - args=(1, 2), kwargs='foobarbaz', id=1) - l.update_strategies() - l.event_dispatcher = mock_event_dispatcher() - - callback = self._get_on_message(l) - callback(m.decode(), m) - self.assertIn('Received invalid task message', error.call_args[0][0]) - - @patch('celery.worker.consumer.crit') - def test_on_decode_error(self, crit): - l = Consumer(self.buffer.put, timer=self.timer, app=self.app) - - class MockMessage(Mock): - content_type = 'application/x-msgpack' - content_encoding = 'binary' - body = 'foobarbaz' - - message = MockMessage() - l.on_decode_error(message, KeyError('foo')) - self.assertTrue(message.ack.call_count) - self.assertIn("Can't decode message body", crit.call_args[0][0]) - - def _get_on_message(self, l): - if l.qos is None: - l.qos = Mock() - l.event_dispatcher = mock_event_dispatcher() - l.task_consumer = Mock() - l.connection = Mock() - l.connection.drain_events.side_effect = WorkerShutdown() - - with self.assertRaises(WorkerShutdown): - l.loop(*l.loop_args()) - self.assertTrue(l.task_consumer.register_callback.called) - return l.task_consumer.register_callback.call_args[0][0] - - def test_receieve_message(self): - l = Consumer(self.buffer.put, timer=self.timer, app=self.app) - l.blueprint.state = RUN - l.event_dispatcher = mock_event_dispatcher() - m = create_message(Mock(), task=self.foo_task.name, - args=[2, 4, 8], kwargs={}) - l.update_strategies() - callback = self._get_on_message(l) - callback(m.decode(), m) - - in_bucket = self.buffer.get_nowait() - self.assertIsInstance(in_bucket, Request) - self.assertEqual(in_bucket.name, self.foo_task.name) - self.assertEqual(in_bucket.execute(), 2 * 4 * 8) - self.assertTrue(self.timer.empty()) - - def test_start_channel_error(self): - - class MockConsumer(Consumer): - iterations = 0 - - def loop(self, *args, **kwargs): - if not self.iterations: - self.iterations = 1 - raise KeyError('foo') - raise SyntaxError('bar') - - l = MockConsumer(self.buffer.put, timer=self.timer, - send_events=False, pool=BasePool(), app=self.app) - l.channel_errors = (KeyError, ) - with self.assertRaises(KeyError): - l.start() - l.timer.stop() - - def test_start_connection_error(self): - - class MockConsumer(Consumer): - iterations = 0 - - def loop(self, *args, **kwargs): - if not self.iterations: - self.iterations = 1 - raise KeyError('foo') - raise SyntaxError('bar') - - l = MockConsumer(self.buffer.put, timer=self.timer, - send_events=False, pool=BasePool(), app=self.app) - - l.connection_errors = (KeyError, ) - self.assertRaises(SyntaxError, l.start) - l.timer.stop() - - def test_loop_ignores_socket_timeout(self): - - class Connection(self.app.connection().__class__): - obj = None - - def drain_events(self, **kwargs): - self.obj.connection = None - raise socket.timeout(10) - - l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - l.connection = Connection() - l.task_consumer = Mock() - l.connection.obj = l - l.qos = QoS(l.task_consumer.qos, 10) - l.loop(*l.loop_args()) - - def test_loop_when_socket_error(self): - - class Connection(self.app.connection().__class__): - obj = None - - def drain_events(self, **kwargs): - self.obj.connection = None - raise socket.error('foo') - - l = Consumer(self.buffer.put, timer=self.timer, app=self.app) - l.blueprint.state = RUN - c = l.connection = Connection() - l.connection.obj = l - l.task_consumer = Mock() - l.qos = QoS(l.task_consumer.qos, 10) - with self.assertRaises(socket.error): - l.loop(*l.loop_args()) - - l.blueprint.state = CLOSE - l.connection = c - l.loop(*l.loop_args()) - - def test_loop(self): - - class Connection(self.app.connection().__class__): - obj = None - - def drain_events(self, **kwargs): - self.obj.connection = None - - l = Consumer(self.buffer.put, timer=self.timer, app=self.app) - l.blueprint.state = RUN - l.connection = Connection() - l.connection.obj = l - l.task_consumer = Mock() - l.qos = QoS(l.task_consumer.qos, 10) - - l.loop(*l.loop_args()) - l.loop(*l.loop_args()) - self.assertTrue(l.task_consumer.consume.call_count) - l.task_consumer.qos.assert_called_with(prefetch_count=10) - self.assertEqual(l.qos.value, 10) - l.qos.decrement_eventually() - self.assertEqual(l.qos.value, 9) - l.qos.update() - self.assertEqual(l.qos.value, 9) - l.task_consumer.qos.assert_called_with(prefetch_count=9) - - def test_ignore_errors(self): - l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - l.connection_errors = (AttributeError, KeyError, ) - l.channel_errors = (SyntaxError, ) - ignore_errors(l, Mock(side_effect=AttributeError('foo'))) - ignore_errors(l, Mock(side_effect=KeyError('foo'))) - ignore_errors(l, Mock(side_effect=SyntaxError('foo'))) - with self.assertRaises(IndexError): - ignore_errors(l, Mock(side_effect=IndexError('foo'))) - - def test_apply_eta_task(self): - from celery.worker import state - l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - l.qos = QoS(None, 10) - - task = object() - qos = l.qos.value - l.apply_eta_task(task) - self.assertIn(task, state.reserved_requests) - self.assertEqual(l.qos.value, qos - 1) - self.assertIs(self.buffer.get_nowait(), task) - - def test_receieve_message_eta_isoformat(self): - l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - l.blueprint.state = RUN - l.steps.pop() - m = create_message( - Mock(), task=self.foo_task.name, - eta=(datetime.now() + timedelta(days=1)).isoformat(), - args=[2, 4, 8], kwargs={}, - ) - - l.task_consumer = Mock() - l.qos = QoS(l.task_consumer.qos, 1) - current_pcount = l.qos.value - l.event_dispatcher = mock_event_dispatcher() - l.enabled = False - l.update_strategies() - callback = self._get_on_message(l) - callback(m.decode(), m) - l.timer.stop() - l.timer.join(1) - - items = [entry[2] for entry in self.timer.queue] - found = 0 - for item in items: - if item.args[0].name == self.foo_task.name: - found = True - self.assertTrue(found) - self.assertGreater(l.qos.value, current_pcount) - l.timer.stop() - - def test_pidbox_callback(self): - l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - con = find_step(l, consumer.Control).box - con.node = Mock() - con.reset = Mock() - - con.on_message('foo', 'bar') - con.node.handle_message.assert_called_with('foo', 'bar') - - con.node = Mock() - con.node.handle_message.side_effect = KeyError('foo') - con.on_message('foo', 'bar') - con.node.handle_message.assert_called_with('foo', 'bar') - - con.node = Mock() - con.node.handle_message.side_effect = ValueError('foo') - con.on_message('foo', 'bar') - con.node.handle_message.assert_called_with('foo', 'bar') - self.assertTrue(con.reset.called) - - def test_revoke(self): - l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - l.blueprint.state = RUN - l.steps.pop() - backend = Mock() - id = uuid() - t = create_message(backend, task=self.foo_task.name, args=[2, 4, 8], - kwargs={}, id=id) - from celery.worker.state import revoked - revoked.add(id) - - callback = self._get_on_message(l) - callback(t.decode(), t) - self.assertTrue(self.buffer.empty()) - - def test_receieve_message_not_registered(self): - l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - l.blueprint.state = RUN - l.steps.pop() - backend = Mock() - m = create_message(backend, task='x.X.31x', args=[2, 4, 8], kwargs={}) - - l.event_dispatcher = mock_event_dispatcher() - callback = self._get_on_message(l) - self.assertFalse(callback(m.decode(), m)) - with self.assertRaises(Empty): - self.buffer.get_nowait() - self.assertTrue(self.timer.empty()) - - @patch('celery.worker.consumer.warn') - @patch('celery.worker.consumer.logger') - def test_receieve_message_ack_raises(self, logger, warn): - l = Consumer(self.buffer.put, timer=self.timer, app=self.app) - l.blueprint.state = RUN - backend = Mock() - m = create_message(backend, args=[2, 4, 8], kwargs={}) - - l.event_dispatcher = mock_event_dispatcher() - l.connection_errors = (socket.error, ) - m.reject = Mock() - m.reject.side_effect = socket.error('foo') - callback = self._get_on_message(l) - self.assertFalse(callback(m.decode(), m)) - self.assertTrue(warn.call_count) - with self.assertRaises(Empty): - self.buffer.get_nowait() - self.assertTrue(self.timer.empty()) - m.reject.assert_called_with(requeue=False) - self.assertTrue(logger.critical.call_count) - - def test_receive_message_eta(self): - import sys - from functools import partial - if os.environ.get('C_DEBUG_TEST'): - pp = partial(print, file=sys.__stderr__) - else: - def pp(*args, **kwargs): - pass - pp('TEST RECEIVE MESSAGE ETA') - pp('+CREATE MYKOMBUCONSUMER') - l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - pp('-CREATE MYKOMBUCONSUMER') - l.steps.pop() - l.event_dispatcher = mock_event_dispatcher() - backend = Mock() - pp('+ CREATE MESSAGE') - m = create_message( - backend, task=self.foo_task.name, - args=[2, 4, 8], kwargs={}, - eta=(datetime.now() + timedelta(days=1)).isoformat(), - ) - pp('- CREATE MESSAGE') - - try: - pp('+ BLUEPRINT START 1') - l.blueprint.start(l) - pp('- BLUEPRINT START 1') - p = l.app.conf.BROKER_CONNECTION_RETRY - l.app.conf.BROKER_CONNECTION_RETRY = False - pp('+ BLUEPRINT START 2') - l.blueprint.start(l) - pp('- BLUEPRINT START 2') - l.app.conf.BROKER_CONNECTION_RETRY = p - pp('+ BLUEPRINT RESTART') - l.blueprint.restart(l) - pp('- BLUEPRINT RESTART') - l.event_dispatcher = mock_event_dispatcher() - pp('+ GET ON MESSAGE') - callback = self._get_on_message(l) - pp('- GET ON MESSAGE') - pp('+ CALLBACK') - callback(m.decode(), m) - pp('- CALLBACK') - finally: - pp('+ STOP TIMER') - l.timer.stop() - pp('- STOP TIMER') - try: - pp('+ JOIN TIMER') - l.timer.join() - pp('- JOIN TIMER') - except RuntimeError: - pass - - in_hold = l.timer.queue[0] - self.assertEqual(len(in_hold), 3) - eta, priority, entry = in_hold - task = entry.args[0] - self.assertIsInstance(task, Request) - self.assertEqual(task.name, self.foo_task.name) - self.assertEqual(task.execute(), 2 * 4 * 8) - with self.assertRaises(Empty): - self.buffer.get_nowait() - - def test_reset_pidbox_node(self): - l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - con = find_step(l, consumer.Control).box - con.node = Mock() - chan = con.node.channel = Mock() - l.connection = Mock() - chan.close.side_effect = socket.error('foo') - l.connection_errors = (socket.error, ) - con.reset() - chan.close.assert_called_with() - - def test_reset_pidbox_node_green(self): - from celery.worker.pidbox import gPidbox - pool = Mock() - pool.is_green = True - l = MyKombuConsumer(self.buffer.put, timer=self.timer, pool=pool, - app=self.app) - con = find_step(l, consumer.Control) - self.assertIsInstance(con.box, gPidbox) - con.start(l) - l.pool.spawn_n.assert_called_with( - con.box.loop, l, - ) - - def test__green_pidbox_node(self): - pool = Mock() - pool.is_green = True - l = MyKombuConsumer(self.buffer.put, timer=self.timer, pool=pool, - app=self.app) - l.node = Mock() - controller = find_step(l, consumer.Control) - - class BConsumer(Mock): - - def __enter__(self): - self.consume() - return self - - def __exit__(self, *exc_info): - self.cancel() - - controller.box.node.listen = BConsumer() - connections = [] - - class Connection(object): - calls = 0 - - def __init__(self, obj): - connections.append(self) - self.obj = obj - self.default_channel = self.channel() - self.closed = False - - def __enter__(self): - return self - - def __exit__(self, *exc_info): - self.close() - - def channel(self): - return Mock() - - def as_uri(self): - return 'dummy://' - - def drain_events(self, **kwargs): - if not self.calls: - self.calls += 1 - raise socket.timeout() - self.obj.connection = None - controller.box._node_shutdown.set() - - def close(self): - self.closed = True - - l.connection = Mock() - l.connect = lambda: Connection(obj=l) - controller = find_step(l, consumer.Control) - controller.box.loop(l) - - self.assertTrue(controller.box.node.listen.called) - self.assertTrue(controller.box.consumer) - controller.box.consumer.consume.assert_called_with() - - self.assertIsNone(l.connection) - self.assertTrue(connections[0].closed) - - @patch('kombu.connection.Connection._establish_connection') - @patch('kombu.utils.sleep') - def test_connect_errback(self, sleep, connect): - l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - from kombu.transport.memory import Transport - Transport.connection_errors = (ChannelError, ) - - def effect(): - if connect.call_count > 1: - return - raise ChannelError('error') - connect.side_effect = effect - l.connect() - connect.assert_called_with() - - def test_stop_pidbox_node(self): - l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - cont = find_step(l, consumer.Control) - cont._node_stopped = Event() - cont._node_shutdown = Event() - cont._node_stopped.set() - cont.stop(l) - - def test_start__loop(self): - - class _QoS(object): - prev = 3 - value = 4 - - def update(self): - self.prev = self.value - - class _Consumer(MyKombuConsumer): - iterations = 0 - - def reset_connection(self): - if self.iterations >= 1: - raise KeyError('foo') - - init_callback = Mock() - l = _Consumer(self.buffer.put, timer=self.timer, - init_callback=init_callback, app=self.app) - l.task_consumer = Mock() - l.broadcast_consumer = Mock() - l.qos = _QoS() - l.connection = Connection() - l.iterations = 0 - - def raises_KeyError(*args, **kwargs): - l.iterations += 1 - if l.qos.prev != l.qos.value: - l.qos.update() - if l.iterations >= 2: - raise KeyError('foo') - - l.loop = raises_KeyError - with self.assertRaises(KeyError): - l.start() - self.assertEqual(l.iterations, 2) - self.assertEqual(l.qos.prev, l.qos.value) - - init_callback.reset_mock() - l = _Consumer(self.buffer.put, timer=self.timer, app=self.app, - send_events=False, init_callback=init_callback) - l.qos = _QoS() - l.task_consumer = Mock() - l.broadcast_consumer = Mock() - l.connection = Connection() - l.loop = Mock(side_effect=socket.error('foo')) - with self.assertRaises(socket.error): - l.start() - self.assertTrue(l.loop.call_count) - - def test_reset_connection_with_no_node(self): - l = Consumer(self.buffer.put, timer=self.timer, app=self.app) - l.steps.pop() - self.assertEqual(None, l.pool) - l.blueprint.start(l) - - -class test_WorkController(AppCase): - - def setup(self): - self.worker = self.create_worker() - from celery import worker - self._logger = worker.logger - self._comp_logger = components.logger - self.logger = worker.logger = Mock() - self.comp_logger = components.logger = Mock() - - @self.app.task(shared=False) - def foo_task(x, y, z): - return x * y * z - self.foo_task = foo_task - - def teardown(self): - from celery import worker - worker.logger = self._logger - components.logger = self._comp_logger - - def create_worker(self, **kw): - worker = self.app.WorkController(concurrency=1, loglevel=0, **kw) - worker.blueprint.shutdown_complete.set() - return worker - - def test_on_consumer_ready(self): - self.worker.on_consumer_ready(Mock()) - - def test_setup_queues_worker_direct(self): - self.app.conf.CELERY_WORKER_DIRECT = True - self.app.amqp.__dict__['queues'] = Mock() - self.worker.setup_queues({}) - self.app.amqp.queues.select_add.assert_called_with( - worker_direct(self.worker.hostname), - ) - - def test_send_worker_shutdown(self): - with patch('celery.signals.worker_shutdown') as ws: - self.worker._send_worker_shutdown() - ws.send.assert_called_with(sender=self.worker) - - def test_process_shutdown_on_worker_shutdown(self): - raise SkipTest('unstable test') - from celery.concurrency.prefork import process_destructor - from celery.concurrency.asynpool import Worker - with patch('celery.signals.worker_process_shutdown') as ws: - Worker._make_shortcuts = Mock() - with patch('os._exit') as _exit: - worker = Worker(None, None, on_exit=process_destructor) - worker._do_exit(22, 3.1415926) - ws.send.assert_called_with( - sender=None, pid=22, exitcode=3.1415926, - ) - _exit.assert_called_with(3.1415926) - - def test_process_task_revoked_release_semaphore(self): - self.worker._quick_release = Mock() - req = Mock() - req.execute_using_pool.side_effect = TaskRevokedError - self.worker._process_task(req) - self.worker._quick_release.assert_called_with() - - delattr(self.worker, '_quick_release') - self.worker._process_task(req) - - def test_shutdown_no_blueprint(self): - self.worker.blueprint = None - self.worker._shutdown() - - @patch('celery.platforms.create_pidlock') - def test_use_pidfile(self, create_pidlock): - create_pidlock.return_value = Mock() - worker = self.create_worker(pidfile='pidfilelockfilepid') - worker.steps = [] - worker.start() - self.assertTrue(create_pidlock.called) - worker.stop() - self.assertTrue(worker.pidlock.release.called) - - @patch('celery.platforms.signals') - @patch('celery.platforms.set_mp_process_title') - def test_process_initializer(self, set_mp_process_title, _signals): - with restore_logging(): - from celery import signals - from celery._state import _tls - from celery.concurrency.prefork import ( - process_initializer, WORKER_SIGRESET, WORKER_SIGIGNORE, - ) - - def on_worker_process_init(**kwargs): - on_worker_process_init.called = True - on_worker_process_init.called = False - signals.worker_process_init.connect(on_worker_process_init) - - def Loader(*args, **kwargs): - loader = Mock(*args, **kwargs) - loader.conf = {} - loader.override_backends = {} - return loader - - with self.Celery(loader=Loader) as app: - app.conf = AttributeDict(DEFAULTS) - process_initializer(app, 'awesome.worker.com') - _signals.ignore.assert_any_call(*WORKER_SIGIGNORE) - _signals.reset.assert_any_call(*WORKER_SIGRESET) - self.assertTrue(app.loader.init_worker.call_count) - self.assertTrue(on_worker_process_init.called) - self.assertIs(_tls.current_app, app) - set_mp_process_title.assert_called_with( - 'celeryd', hostname='awesome.worker.com', - ) - - with patch('celery.app.trace.setup_worker_optimizations') as S: - os.environ['FORKED_BY_MULTIPROCESSING'] = "1" - try: - process_initializer(app, 'luke.worker.com') - S.assert_called_with(app) - finally: - os.environ.pop('FORKED_BY_MULTIPROCESSING', None) - - def test_attrs(self): - worker = self.worker - self.assertIsNotNone(worker.timer) - self.assertIsInstance(worker.timer, Timer) - self.assertIsNotNone(worker.pool) - self.assertIsNotNone(worker.consumer) - self.assertTrue(worker.steps) - - def test_with_embedded_beat(self): - worker = self.app.WorkController(concurrency=1, loglevel=0, beat=True) - self.assertTrue(worker.beat) - self.assertIn(worker.beat, [w.obj for w in worker.steps]) - - def test_with_autoscaler(self): - worker = self.create_worker( - autoscale=[10, 3], send_events=False, - timer_cls='celery.utils.timer2.Timer', - ) - self.assertTrue(worker.autoscaler) - - def test_dont_stop_or_terminate(self): - worker = self.app.WorkController(concurrency=1, loglevel=0) - worker.stop() - self.assertNotEqual(worker.blueprint.state, CLOSE) - worker.terminate() - self.assertNotEqual(worker.blueprint.state, CLOSE) - - sigsafe, worker.pool.signal_safe = worker.pool.signal_safe, False - try: - worker.blueprint.state = RUN - worker.stop(in_sighandler=True) - self.assertNotEqual(worker.blueprint.state, CLOSE) - worker.terminate(in_sighandler=True) - self.assertNotEqual(worker.blueprint.state, CLOSE) - finally: - worker.pool.signal_safe = sigsafe - - def test_on_timer_error(self): - worker = self.app.WorkController(concurrency=1, loglevel=0) - - try: - raise KeyError('foo') - except KeyError as exc: - components.Timer(worker).on_timer_error(exc) - msg, args = self.comp_logger.error.call_args[0] - self.assertIn('KeyError', msg % args) - - def test_on_timer_tick(self): - worker = self.app.WorkController(concurrency=1, loglevel=10) - - components.Timer(worker).on_timer_tick(30.0) - xargs = self.comp_logger.debug.call_args[0] - fmt, arg = xargs[0], xargs[1] - self.assertEqual(30.0, arg) - self.assertIn('Next eta %s secs', fmt) - - def test_process_task(self): - worker = self.worker - worker.pool = Mock() - backend = Mock() - m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10], - kwargs={}) - task = Request(m.decode(), message=m, app=self.app) - worker._process_task(task) - self.assertEqual(worker.pool.apply_async.call_count, 1) - worker.pool.stop() - - def test_process_task_raise_base(self): - worker = self.worker - worker.pool = Mock() - worker.pool.apply_async.side_effect = KeyboardInterrupt('Ctrl+C') - backend = Mock() - m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10], - kwargs={}) - task = Request(m.decode(), message=m, app=self.app) - worker.steps = [] - worker.blueprint.state = RUN - with self.assertRaises(KeyboardInterrupt): - worker._process_task(task) - - def test_process_task_raise_WorkerTerminate(self): - worker = self.worker - worker.pool = Mock() - worker.pool.apply_async.side_effect = WorkerTerminate() - backend = Mock() - m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10], - kwargs={}) - task = Request(m.decode(), message=m, app=self.app) - worker.steps = [] - worker.blueprint.state = RUN - with self.assertRaises(SystemExit): - worker._process_task(task) - - def test_process_task_raise_regular(self): - worker = self.worker - worker.pool = Mock() - worker.pool.apply_async.side_effect = KeyError('some exception') - backend = Mock() - m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10], - kwargs={}) - task = Request(m.decode(), message=m, app=self.app) - worker._process_task(task) - worker.pool.stop() - - def test_start_catches_base_exceptions(self): - worker1 = self.create_worker() - worker1.blueprint.state = RUN - stc = MockStep() - stc.start.side_effect = WorkerTerminate() - worker1.steps = [stc] - worker1.start() - stc.start.assert_called_with(worker1) - self.assertTrue(stc.terminate.call_count) - - worker2 = self.create_worker() - worker2.blueprint.state = RUN - sec = MockStep() - sec.start.side_effect = WorkerShutdown() - sec.terminate = None - worker2.steps = [sec] - worker2.start() - self.assertTrue(sec.stop.call_count) - - def test_state_db(self): - from celery.worker import state - Persistent = state.Persistent - - state.Persistent = Mock() - try: - worker = self.create_worker(state_db='statefilename') - self.assertTrue(worker._persistence) - finally: - state.Persistent = Persistent - - def test_process_task_sem(self): - worker = self.worker - worker._quick_acquire = Mock() - - req = Mock() - worker._process_task_sem(req) - worker._quick_acquire.assert_called_with(worker._process_task, req) - - def test_signal_consumer_close(self): - worker = self.worker - worker.consumer = Mock() - - worker.signal_consumer_close() - worker.consumer.close.assert_called_with() - - worker.consumer.close.side_effect = AttributeError() - worker.signal_consumer_close() - - def test_start__stop(self): - worker = self.worker - worker.blueprint.shutdown_complete.set() - worker.steps = [MockStep(StartStopStep(self)) for _ in range(4)] - worker.blueprint.state = RUN - worker.blueprint.started = 4 - for w in worker.steps: - w.start = Mock() - w.close = Mock() - w.stop = Mock() - - worker.start() - for w in worker.steps: - self.assertTrue(w.start.call_count) - worker.consumer = Mock() - worker.stop() - for stopstep in worker.steps: - self.assertTrue(stopstep.close.call_count) - self.assertTrue(stopstep.stop.call_count) - - # Doesn't close pool if no pool. - worker.start() - worker.pool = None - worker.stop() - - # test that stop of None is not attempted - worker.steps[-1] = None - worker.start() - worker.stop() - - def test_step_raises(self): - worker = self.worker - step = Mock() - worker.steps = [step] - step.start.side_effect = TypeError() - worker.stop = Mock() - worker.start() - worker.stop.assert_called_with() - - def test_state(self): - self.assertTrue(self.worker.state) - - def test_start__terminate(self): - worker = self.worker - worker.blueprint.shutdown_complete.set() - worker.blueprint.started = 5 - worker.blueprint.state = RUN - worker.steps = [MockStep() for _ in range(5)] - worker.start() - for w in worker.steps[:3]: - self.assertTrue(w.start.call_count) - self.assertTrue(worker.blueprint.started, len(worker.steps)) - self.assertEqual(worker.blueprint.state, RUN) - worker.terminate() - for step in worker.steps: - self.assertTrue(step.terminate.call_count) - - def test_Queues_pool_no_sem(self): - w = Mock() - w.pool_cls.uses_semaphore = False - components.Queues(w).create(w) - self.assertIs(w.process_task, w._process_task) - - def test_Hub_crate(self): - w = Mock() - x = components.Hub(w) - x.create(w) - self.assertTrue(w.timer.max_interval) - - def test_Pool_crate_threaded(self): - w = Mock() - w._conninfo.connection_errors = w._conninfo.channel_errors = () - w.pool_cls = Mock() - w.use_eventloop = False - pool = components.Pool(w) - pool.create(w) - - def test_Pool_create(self): - from kombu.async.semaphore import LaxBoundedSemaphore - w = Mock() - w._conninfo.connection_errors = w._conninfo.channel_errors = () - w.hub = Mock() - - PoolImp = Mock() - poolimp = PoolImp.return_value = Mock() - poolimp._pool = [Mock(), Mock()] - poolimp._cache = {} - poolimp._fileno_to_inq = {} - poolimp._fileno_to_outq = {} - - from celery.concurrency.prefork import TaskPool as _TaskPool - - class MockTaskPool(_TaskPool): - Pool = PoolImp - - @property - def timers(self): - return {Mock(): 30} - - w.pool_cls = MockTaskPool - w.use_eventloop = True - w.consumer.restart_count = -1 - pool = components.Pool(w) - pool.create(w) - pool.register_with_event_loop(w, w.hub) - self.assertIsInstance(w.semaphore, LaxBoundedSemaphore) - P = w.pool - P.start() diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/utils/__init__.py deleted file mode 100644 index 20e11f0..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/__init__.py +++ /dev/null @@ -1,407 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils - ~~~~~~~~~~~~ - - Utility functions. - -""" -from __future__ import absolute_import, print_function - -import numbers -import os -import re -import socket -import sys -import traceback -import warnings -import datetime - -from collections import Callable -from functools import partial, wraps -from inspect import getargspec -from pprint import pprint - -from kombu.entity import Exchange, Queue - -from celery.exceptions import CPendingDeprecationWarning, CDeprecationWarning -from celery.five import WhateverIO, items, reraise, string_t - -__all__ = ['worker_direct', 'warn_deprecated', 'deprecated', 'lpmerge', - 'is_iterable', 'isatty', 'cry', 'maybe_reraise', 'strtobool', - 'jsonify', 'gen_task_name', 'nodename', 'nodesplit', - 'cached_property'] - -PY3 = sys.version_info[0] == 3 - - -PENDING_DEPRECATION_FMT = """ - {description} is scheduled for deprecation in \ - version {deprecation} and removal in version v{removal}. \ - {alternative} -""" - -DEPRECATION_FMT = """ - {description} is deprecated and scheduled for removal in - version {removal}. {alternative} -""" - -UNKNOWN_SIMPLE_FORMAT_KEY = """ -Unknown format %{0} in string {1!r}. -Possible causes: Did you forget to escape the expand sign (use '%%{0!r}'), -or did you escape and the value was expanded twice? (%%N -> %N -> %hostname)? -""".strip() - -#: Billiard sets this when execv is enabled. -#: We use it to find out the name of the original ``__main__`` -#: module, so that we can properly rewrite the name of the -#: task to be that of ``App.main``. -MP_MAIN_FILE = os.environ.get('MP_MAIN_FILE') or None - -#: Exchange for worker direct queues. -WORKER_DIRECT_EXCHANGE = Exchange('C.dq') - -#: Format for worker direct queue names. -WORKER_DIRECT_QUEUE_FORMAT = '{hostname}.dq' - -#: Separator for worker node name and hostname. -NODENAME_SEP = '@' - -NODENAME_DEFAULT = 'celery' -RE_FORMAT = re.compile(r'%(\w)') - - -def worker_direct(hostname): - """Return :class:`kombu.Queue` that is a direct route to - a worker by hostname. - - :param hostname: The fully qualified node name of a worker - (e.g. ``w1@example.com``). If passed a - :class:`kombu.Queue` instance it will simply return - that instead. - """ - if isinstance(hostname, Queue): - return hostname - return Queue(WORKER_DIRECT_QUEUE_FORMAT.format(hostname=hostname), - WORKER_DIRECT_EXCHANGE, - hostname, auto_delete=True) - - -def warn_deprecated(description=None, deprecation=None, - removal=None, alternative=None, stacklevel=2): - ctx = {'description': description, - 'deprecation': deprecation, 'removal': removal, - 'alternative': alternative} - if deprecation is not None: - w = CPendingDeprecationWarning(PENDING_DEPRECATION_FMT.format(**ctx)) - else: - w = CDeprecationWarning(DEPRECATION_FMT.format(**ctx)) - warnings.warn(w, stacklevel=stacklevel) - - -def deprecated(deprecation=None, removal=None, - alternative=None, description=None): - """Decorator for deprecated functions. - - A deprecation warning will be emitted when the function is called. - - :keyword deprecation: Version that marks first deprecation, if this - argument is not set a ``PendingDeprecationWarning`` will be emitted - instead. - :keyword removal: Future version when this feature will be removed. - :keyword alternative: Instructions for an alternative solution (if any). - :keyword description: Description of what is being deprecated. - - """ - def _inner(fun): - - @wraps(fun) - def __inner(*args, **kwargs): - from .imports import qualname - warn_deprecated(description=description or qualname(fun), - deprecation=deprecation, - removal=removal, - alternative=alternative, - stacklevel=3) - return fun(*args, **kwargs) - return __inner - return _inner - - -def deprecated_property(deprecation=None, removal=None, - alternative=None, description=None): - def _inner(fun): - return _deprecated_property( - fun, deprecation=deprecation, removal=removal, - alternative=alternative, description=description or fun.__name__) - return _inner - - -class _deprecated_property(object): - - def __init__(self, fget=None, fset=None, fdel=None, doc=None, **depreinfo): - self.__get = fget - self.__set = fset - self.__del = fdel - self.__name__, self.__module__, self.__doc__ = ( - fget.__name__, fget.__module__, fget.__doc__, - ) - self.depreinfo = depreinfo - self.depreinfo.setdefault('stacklevel', 3) - - def __get__(self, obj, type=None): - if obj is None: - return self - warn_deprecated(**self.depreinfo) - return self.__get(obj) - - def __set__(self, obj, value): - if obj is None: - return self - if self.__set is None: - raise AttributeError('cannot set attribute') - warn_deprecated(**self.depreinfo) - self.__set(obj, value) - - def __delete__(self, obj): - if obj is None: - return self - if self.__del is None: - raise AttributeError('cannot delete attribute') - warn_deprecated(**self.depreinfo) - self.__del(obj) - - def setter(self, fset): - return self.__class__(self.__get, fset, self.__del, **self.depreinfo) - - def deleter(self, fdel): - return self.__class__(self.__get, self.__set, fdel, **self.depreinfo) - - -def lpmerge(L, R): - """In place left precedent dictionary merge. - - Keeps values from `L`, if the value in `R` is :const:`None`.""" - set = L.__setitem__ - [set(k, v) for k, v in items(R) if v is not None] - return L - - -def is_iterable(obj): - try: - iter(obj) - except TypeError: - return False - return True - - -def fun_takes_kwargs(fun, kwlist=[]): - # deprecated - S = getattr(fun, 'argspec', getargspec(fun)) - if S.keywords is not None: - return kwlist - return [kw for kw in kwlist if kw in S.args] - - -def isatty(fh): - try: - return fh.isatty() - except AttributeError: - pass - - -def cry(out=None, sepchr='=', seplen=49): # pragma: no cover - """Return stacktrace of all active threads, - taken from https://gist.github.com/737056.""" - import threading - - out = WhateverIO() if out is None else out - P = partial(print, file=out) - - # get a map of threads by their ID so we can print their names - # during the traceback dump - tmap = dict((t.ident, t) for t in threading.enumerate()) - - sep = sepchr * seplen - for tid, frame in items(sys._current_frames()): - thread = tmap.get(tid) - if not thread: - # skip old junk (left-overs from a fork) - continue - P('{0.name}'.format(thread)) - P(sep) - traceback.print_stack(frame, file=out) - P(sep) - P('LOCAL VARIABLES') - P(sep) - pprint(frame.f_locals, stream=out) - P('\n') - return out.getvalue() - - -def maybe_reraise(): - """Re-raise if an exception is currently being handled, or return - otherwise.""" - exc_info = sys.exc_info() - try: - if exc_info[2]: - reraise(exc_info[0], exc_info[1], exc_info[2]) - finally: - # see http://docs.python.org/library/sys.html#sys.exc_info - del(exc_info) - - -def strtobool(term, table={'false': False, 'no': False, '0': False, - 'true': True, 'yes': True, '1': True, - 'on': True, 'off': False}): - """Convert common terms for true/false to bool - (true/false/yes/no/on/off/1/0).""" - if isinstance(term, string_t): - try: - return table[term.lower()] - except KeyError: - raise TypeError('Cannot coerce {0!r} to type bool'.format(term)) - return term - - -def jsonify(obj, - builtin_types=(numbers.Real, string_t), key=None, - keyfilter=None, - unknown_type_filter=None): - """Transforms object making it suitable for json serialization""" - from kombu.abstract import Object as KombuDictType - _jsonify = partial(jsonify, builtin_types=builtin_types, key=key, - keyfilter=keyfilter, - unknown_type_filter=unknown_type_filter) - - if isinstance(obj, KombuDictType): - obj = obj.as_dict(recurse=True) - - if obj is None or isinstance(obj, builtin_types): - return obj - elif isinstance(obj, (tuple, list)): - return [_jsonify(v) for v in obj] - elif isinstance(obj, dict): - return dict((k, _jsonify(v, key=k)) - for k, v in items(obj) - if (keyfilter(k) if keyfilter else 1)) - elif isinstance(obj, datetime.datetime): - # See "Date Time String Format" in the ECMA-262 specification. - r = obj.isoformat() - if obj.microsecond: - r = r[:23] + r[26:] - if r.endswith('+00:00'): - r = r[:-6] + 'Z' - return r - elif isinstance(obj, datetime.date): - return obj.isoformat() - elif isinstance(obj, datetime.time): - r = obj.isoformat() - if obj.microsecond: - r = r[:12] - return r - elif isinstance(obj, datetime.timedelta): - return str(obj) - else: - if unknown_type_filter is None: - raise ValueError( - 'Unsupported type: {0!r} {1!r} (parent: {2})'.format( - type(obj), obj, key)) - return unknown_type_filter(obj) - - -def gen_task_name(app, name, module_name): - """Generate task name from name/module pair.""" - try: - module = sys.modules[module_name] - except KeyError: - # Fix for manage.py shell_plus (Issue #366) - module = None - - if module is not None: - module_name = module.__name__ - # - If the task module is used as the __main__ script - # - we need to rewrite the module part of the task name - # - to match App.main. - if MP_MAIN_FILE and module.__file__ == MP_MAIN_FILE: - # - see comment about :envvar:`MP_MAIN_FILE` above. - module_name = '__main__' - if module_name == '__main__' and app.main: - return '.'.join([app.main, name]) - return '.'.join(p for p in (module_name, name) if p) - - -def nodename(name, hostname): - """Create node name from name/hostname pair.""" - return NODENAME_SEP.join((name, hostname)) - - -def anon_nodename(hostname=None, prefix='gen'): - return nodename(''.join([prefix, str(os.getpid())]), - hostname or socket.gethostname()) - - -def nodesplit(nodename): - """Split node name into tuple of name/hostname.""" - parts = nodename.split(NODENAME_SEP, 1) - if len(parts) == 1: - return None, parts[0] - return parts - - -def default_nodename(hostname): - name, host = nodesplit(hostname or '') - return nodename(name or NODENAME_DEFAULT, host or socket.gethostname()) - - -def node_format(s, nodename, **extra): - name, host = nodesplit(nodename) - return host_format( - s, host, n=name or NODENAME_DEFAULT, **extra) - - -def _fmt_process_index(prefix='', default='0'): - from .log import current_process_index - index = current_process_index() - return '{0}{1}'.format(prefix, index) if index else default -_fmt_process_index_with_prefix = partial(_fmt_process_index, '-', '') - - -def host_format(s, host=None, **extra): - host = host or socket.gethostname() - name, _, domain = host.partition('.') - keys = dict({ - 'h': host, 'n': name, 'd': domain, - 'i': _fmt_process_index, 'I': _fmt_process_index_with_prefix, - }, **extra) - return simple_format(s, keys) - - -def simple_format(s, keys, pattern=RE_FORMAT, expand=r'\1'): - if s: - keys.setdefault('%', '%') - - def resolve(match): - key = match.expand(expand) - try: - resolver = keys[key] - except KeyError: - raise ValueError(UNKNOWN_SIMPLE_FORMAT_KEY.format(key, s)) - if isinstance(resolver, Callable): - return resolver() - return resolver - - return pattern.sub(resolve, s) - return s - - -# ------------------------------------------------------------------------ # -# > XXX Compat -from .log import LOG_LEVELS # noqa -from .imports import ( # noqa - qualname as get_full_cls_name, symbol_by_name as get_cls_by_name, - instantiate, import_from_cwd -) -from .functional import chunks, noop # noqa -from kombu.utils import cached_property, kwdict, uuid # noqa -gen_unique_id = uuid diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/compat.py b/thesisenv/lib/python3.6/site-packages/celery/utils/compat.py deleted file mode 100644 index 6f62964..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/compat.py +++ /dev/null @@ -1 +0,0 @@ -from celery.five import * # noqa diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/debug.py b/thesisenv/lib/python3.6/site-packages/celery/utils/debug.py deleted file mode 100644 index 09c6ec8..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/debug.py +++ /dev/null @@ -1,167 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils.debug - ~~~~~~~~~~~~~~~~~~ - - Utilities for debugging memory usage. - -""" -from __future__ import absolute_import, print_function, unicode_literals - -import os - -from contextlib import contextmanager -from functools import partial - -from celery.five import range -from celery.platforms import signals - -try: - from psutil import Process -except ImportError: - Process = None # noqa - -__all__ = [ - 'blockdetection', 'sample_mem', 'memdump', 'sample', - 'humanbytes', 'mem_rss', 'ps', -] - -UNITS = ( - (2 ** 40.0, 'TB'), - (2 ** 30.0, 'GB'), - (2 ** 20.0, 'MB'), - (2 ** 10.0, 'kB'), - (0.0, '{0!d}b'), -) - -_process = None -_mem_sample = [] - - -def _on_blocking(signum, frame): - import inspect - raise RuntimeError( - 'Blocking detection timed-out at: {0}'.format( - inspect.getframeinfo(frame) - ) - ) - - -@contextmanager -def blockdetection(timeout): - """A timeout context using ``SIGALRM`` that can be used to detect blocking - functions.""" - if not timeout: - yield - else: - old_handler = signals['ALRM'] - old_handler = None if old_handler == _on_blocking else old_handler - - signals['ALRM'] = _on_blocking - - try: - yield signals.arm_alarm(timeout) - finally: - if old_handler: - signals['ALRM'] = old_handler - signals.reset_alarm() - - -def sample_mem(): - """Sample RSS memory usage. - - Statistics can then be output by calling :func:`memdump`. - - """ - current_rss = mem_rss() - _mem_sample.append(current_rss) - return current_rss - - -def _memdump(samples=10): - S = _mem_sample - prev = list(S) if len(S) <= samples else sample(S, samples) - _mem_sample[:] = [] - import gc - gc.collect() - after_collect = mem_rss() - return prev, after_collect - - -def memdump(samples=10, file=None): - """Dump memory statistics. - - Will print a sample of all RSS memory samples added by - calling :func:`sample_mem`, and in addition print - used RSS memory after :func:`gc.collect`. - - """ - say = partial(print, file=file) - if ps() is None: - say('- rss: (psutil not installed).') - return - prev, after_collect = _memdump(samples) - if prev: - say('- rss (sample):') - for mem in prev: - say('- > {0},'.format(mem)) - say('- rss (end): {0}.'.format(after_collect)) - - -def sample(x, n, k=0): - """Given a list `x` a sample of length ``n`` of that list is returned. - - E.g. if `n` is 10, and `x` has 100 items, a list of every 10th - item is returned. - - ``k`` can be used as offset. - - """ - j = len(x) // n - for _ in range(n): - try: - yield x[k] - except IndexError: - break - k += j - - -def hfloat(f, p=5): - """Convert float to value suitable for humans. - - :keyword p: Float precision. - - """ - i = int(f) - return i if i == f else '{0:.{p}}'.format(f, p=p) - - -def humanbytes(s): - """Convert bytes to human-readable form (e.g. kB, MB).""" - return next( - '{0}{1}'.format(hfloat(s / div if div else s), unit) - for div, unit in UNITS if s >= div - ) - - -def mem_rss(): - """Return RSS memory usage as a humanized string.""" - p = ps() - if p is not None: - return humanbytes(_process_memory_info(p).rss) - - -def ps(): - """Return the global :class:`psutil.Process` instance, - or :const:`None` if :mod:`psutil` is not installed.""" - global _process - if _process is None and Process is not None: - _process = Process(os.getpid()) - return _process - - -def _process_memory_info(process): - try: - return process.memory_info() - except AttributeError: - return process.get_memory_info() diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/__init__.py deleted file mode 100644 index b6e8d0b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import - -from .signal import Signal - -__all__ = ['Signal'] diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/saferef.py b/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/saferef.py deleted file mode 100644 index cd818bb..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/saferef.py +++ /dev/null @@ -1,286 +0,0 @@ -# -*- coding: utf-8 -*- -""" -"Safe weakrefs", originally from pyDispatcher. - -Provides a way to safely weakref any function, including bound methods (which -aren't handled by the core weakref module). -""" -from __future__ import absolute_import - -import sys -import traceback -import weakref - -__all__ = ['safe_ref'] - -PY3 = sys.version_info[0] == 3 - - -def safe_ref(target, on_delete=None): # pragma: no cover - """Return a *safe* weak reference to a callable target - - :param target: the object to be weakly referenced, if it's a - bound method reference, will create a :class:`BoundMethodWeakref`, - otherwise creates a simple :class:`weakref.ref`. - - :keyword on_delete: if provided, will have a hard reference stored - to the callable to be called after the safe reference - goes out of scope with the reference object, (either a - :class:`weakref.ref` or a :class:`BoundMethodWeakref`) as argument. - """ - if getattr(target, '__self__', None) is not None: - # Turn a bound method into a BoundMethodWeakref instance. - # Keep track of these instances for lookup by disconnect(). - assert hasattr(target, '__func__'), \ - """safe_ref target {0!r} has __self__, but no __func__: \ - don't know how to create reference""".format(target) - return get_bound_method_weakref(target=target, - on_delete=on_delete) - if callable(on_delete): - return weakref.ref(target, on_delete) - else: - return weakref.ref(target) - - -class BoundMethodWeakref(object): # pragma: no cover - """'Safe' and reusable weak references to instance methods. - - BoundMethodWeakref objects provide a mechanism for - referencing a bound method without requiring that the - method object itself (which is normally a transient - object) is kept alive. Instead, the BoundMethodWeakref - object keeps weak references to both the object and the - function which together define the instance method. - - .. attribute:: key - - the identity key for the reference, calculated - by the class's :meth:`calculate_key` method applied to the - target instance method - - .. attribute:: deletion_methods - - sequence of callable objects taking - single argument, a reference to this object which - will be called when *either* the target object or - target function is garbage collected (i.e. when - this object becomes invalid). These are specified - as the on_delete parameters of :func:`safe_ref` calls. - - .. attribute:: weak_self - - weak reference to the target object - - .. attribute:: weak_fun - - weak reference to the target function - - .. attribute:: _all_instances - - class attribute pointing to all live - BoundMethodWeakref objects indexed by the class's - `calculate_key(target)` method applied to the target - objects. This weak value dictionary is used to - short-circuit creation so that multiple references - to the same (object, function) pair produce the - same BoundMethodWeakref instance. - - """ - - _all_instances = weakref.WeakValueDictionary() - - def __new__(cls, target, on_delete=None, *arguments, **named): - """Create new instance or return current instance - - Basically this method of construction allows us to - short-circuit creation of references to already- - referenced instance methods. The key corresponding - to the target is calculated, and if there is already - an existing reference, that is returned, with its - deletionMethods attribute updated. Otherwise the - new instance is created and registered in the table - of already-referenced methods. - - """ - key = cls.calculate_key(target) - current = cls._all_instances.get(key) - if current is not None: - current.deletion_methods.append(on_delete) - return current - else: - base = super(BoundMethodWeakref, cls).__new__(cls) - cls._all_instances[key] = base - base.__init__(target, on_delete, *arguments, **named) - return base - - def __init__(self, target, on_delete=None): - """Return a weak-reference-like instance for a bound method - - :param target: the instance-method target for the weak - reference, must have `__self__` and `__func__` attributes - and be reconstructable via:: - - target.__func__.__get__(target.__self__) - - which is true of built-in instance methods. - - :keyword on_delete: optional callback which will be called - when this weak reference ceases to be valid - (i.e. either the object or the function is garbage - collected). Should take a single argument, - which will be passed a pointer to this object. - - """ - def remove(weak, self=self): - """Set self.is_dead to true when method or instance is destroyed""" - methods = self.deletion_methods[:] - del(self.deletion_methods[:]) - try: - del(self.__class__._all_instances[self.key]) - except KeyError: - pass - for function in methods: - try: - if callable(function): - function(self) - except Exception as exc: - try: - traceback.print_exc() - except AttributeError: - print('Exception during saferef {0} cleanup function ' - '{1}: {2}'.format(self, function, exc)) - - self.deletion_methods = [on_delete] - self.key = self.calculate_key(target) - self.weak_self = weakref.ref(target.__self__, remove) - self.weak_fun = weakref.ref(target.__func__, remove) - self.self_name = str(target.__self__) - self.fun_name = str(target.__func__.__name__) - - def calculate_key(cls, target): - """Calculate the reference key for this reference - - Currently this is a two-tuple of the `id()`'s of the - target object and the target function respectively. - """ - return id(target.__self__), id(target.__func__) - calculate_key = classmethod(calculate_key) - - def __str__(self): - """Give a friendly representation of the object""" - return '{0}( {1}.{2} )'.format( - type(self).__name__, - self.self_name, - self.fun_name, - ) - - __repr__ = __str__ - - def __bool__(self): - """Whether we are still a valid reference""" - return self() is not None - __nonzero__ = __bool__ # py2 - - if not PY3: - def __cmp__(self, other): - """Compare with another reference""" - if not isinstance(other, self.__class__): - return cmp(self.__class__, type(other)) # noqa - return cmp(self.key, other.key) # noqa - - def __call__(self): - """Return a strong reference to the bound method - - If the target cannot be retrieved, then will - return None, otherwise return a bound instance - method for our object and function. - - Note: - You may call this method any number of times, - as it does not invalidate the reference. - """ - target = self.weak_self() - if target is not None: - function = self.weak_fun() - if function is not None: - return function.__get__(target) - - -class BoundNonDescriptorMethodWeakref(BoundMethodWeakref): # pragma: no cover - """A specialized :class:`BoundMethodWeakref`, for platforms where - instance methods are not descriptors. - - It assumes that the function name and the target attribute name are the - same, instead of assuming that the function is a descriptor. This approach - is equally fast, but not 100% reliable because functions can be stored on - an attribute named differenty than the function's name such as in:: - - >>> class A(object): - ... pass - - >>> def foo(self): - ... return 'foo' - >>> A.bar = foo - - But this shouldn't be a common use case. So, on platforms where methods - aren't descriptors (such as Jython) this implementation has the advantage - of working in the most cases. - - """ - def __init__(self, target, on_delete=None): - """Return a weak-reference-like instance for a bound method - - :param target: the instance-method target for the weak - reference, must have `__self__` and `__func__` attributes - and be reconstructable via:: - - target.__func__.__get__(target.__self__) - - which is true of built-in instance methods. - - :keyword on_delete: optional callback which will be called - when this weak reference ceases to be valid - (i.e. either the object or the function is garbage - collected). Should take a single argument, - which will be passed a pointer to this object. - - """ - assert getattr(target.__self__, target.__name__) == target - super(BoundNonDescriptorMethodWeakref, self).__init__(target, - on_delete) - - def __call__(self): - """Return a strong reference to the bound method - - If the target cannot be retrieved, then will - return None, otherwise return a bound instance - method for our object and function. - - Note: - You may call this method any number of times, - as it does not invalidate the reference. - - """ - target = self.weak_self() - if target is not None: - function = self.weak_fun() - if function is not None: - # Using curry() would be another option, but it erases the - # "signature" of the function. That is, after a function is - # curried, the inspect module can't be used to determine how - # many arguments the function expects, nor what keyword - # arguments it supports, and pydispatcher needs this - # information. - return getattr(target, function.__name__) - - -def get_bound_method_weakref(target, on_delete): # pragma: no cover - """Instantiates the appropiate :class:`BoundMethodWeakRef`, depending - on the details of the underlying class method implementation.""" - if hasattr(target, '__get__'): - # target method is a descriptor, so the default implementation works: - return BoundMethodWeakref(target=target, on_delete=on_delete) - else: - # no luck, use the alternative implementation: - return BoundNonDescriptorMethodWeakref(target=target, - on_delete=on_delete) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/signal.py b/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/signal.py deleted file mode 100644 index 7d4b337..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/signal.py +++ /dev/null @@ -1,241 +0,0 @@ -# -*- coding: utf-8 -*- -"""Signal class.""" -from __future__ import absolute_import - -import weakref -from . import saferef - -from celery.five import range -from celery.local import PromiseProxy, Proxy - -__all__ = ['Signal'] - -WEAKREF_TYPES = (weakref.ReferenceType, saferef.BoundMethodWeakref) - - -def _make_id(target): # pragma: no cover - if isinstance(target, Proxy): - target = target._get_current_object() - if hasattr(target, '__func__'): - return (id(target.__self__), id(target.__func__)) - return id(target) - - -class Signal(object): # pragma: no cover - """Base class for all signals - - - .. attribute:: receivers - Internal attribute, holds a dictionary of - `{receiverkey (id): weakref(receiver)}` mappings. - - """ - - def __init__(self, providing_args=None): - """Create a new signal. - - :param providing_args: A list of the arguments this signal can pass - along in a :meth:`send` call. - - """ - self.receivers = [] - if providing_args is None: - providing_args = [] - self.providing_args = set(providing_args) - - def _connect_proxy(self, fun, sender, weak, dispatch_uid): - return self.connect( - fun, sender=sender._get_current_object(), - weak=weak, dispatch_uid=dispatch_uid, - ) - - def connect(self, *args, **kwargs): - """Connect receiver to sender for signal. - - :param receiver: A function or an instance method which is to - receive signals. Receivers must be hashable objects. - - if weak is :const:`True`, then receiver must be weak-referencable - (more precisely :func:`saferef.safe_ref()` must be able to create a - reference to the receiver). - - Receivers must be able to accept keyword arguments. - - If receivers have a `dispatch_uid` attribute, the receiver will - not be added if another receiver already exists with that - `dispatch_uid`. - - :keyword sender: The sender to which the receiver should respond. - Must either be of type :class:`Signal`, or :const:`None` to receive - events from any sender. - - :keyword weak: Whether to use weak references to the receiver. - By default, the module will attempt to use weak references to the - receiver objects. If this parameter is false, then strong - references will be used. - - :keyword dispatch_uid: An identifier used to uniquely identify a - particular instance of a receiver. This will usually be a - string, though it may be anything hashable. - - """ - def _handle_options(sender=None, weak=True, dispatch_uid=None): - - def _connect_signal(fun): - receiver = fun - - if isinstance(sender, PromiseProxy): - sender.__then__( - self._connect_proxy, fun, sender, weak, dispatch_uid, - ) - return fun - - if dispatch_uid: - lookup_key = (dispatch_uid, _make_id(sender)) - else: - lookup_key = (_make_id(receiver), _make_id(sender)) - - if weak: - receiver = saferef.safe_ref( - receiver, on_delete=self._remove_receiver, - ) - - for r_key, _ in self.receivers: - if r_key == lookup_key: - break - else: - self.receivers.append((lookup_key, receiver)) - - return fun - - return _connect_signal - - if args and callable(args[0]): - return _handle_options(*args[1:], **kwargs)(args[0]) - return _handle_options(*args, **kwargs) - - def disconnect(self, receiver=None, sender=None, weak=True, - dispatch_uid=None): - """Disconnect receiver from sender for signal. - - If weak references are used, disconnect need not be called. The - receiver will be removed from dispatch automatically. - - :keyword receiver: The registered receiver to disconnect. May be - none if `dispatch_uid` is specified. - - :keyword sender: The registered sender to disconnect. - - :keyword weak: The weakref state to disconnect. - - :keyword dispatch_uid: the unique identifier of the receiver - to disconnect - - """ - if dispatch_uid: - lookup_key = (dispatch_uid, _make_id(sender)) - else: - lookup_key = (_make_id(receiver), _make_id(sender)) - - for index in range(len(self.receivers)): - (r_key, _) = self.receivers[index] - if r_key == lookup_key: - del self.receivers[index] - break - - def send(self, sender, **named): - """Send signal from sender to all connected receivers. - - If any receiver raises an error, the error propagates back through - send, terminating the dispatch loop, so it is quite possible to not - have all receivers called if a raises an error. - - :param sender: The sender of the signal. Either a specific - object or :const:`None`. - - :keyword \*\*named: Named arguments which will be passed to receivers. - - :returns: a list of tuple pairs: `[(receiver, response), … ]`. - - """ - responses = [] - if not self.receivers: - return responses - - for receiver in self._live_receivers(_make_id(sender)): - response = receiver(signal=self, sender=sender, **named) - responses.append((receiver, response)) - return responses - - def send_robust(self, sender, **named): - """Send signal from sender to all connected receivers catching errors. - - :param sender: The sender of the signal. Can be any python object - (normally one registered with a connect if you actually want - something to occur). - - :keyword \*\*named: Named arguments which will be passed to receivers. - These arguments must be a subset of the argument names defined in - :attr:`providing_args`. - - :returns: a list of tuple pairs: `[(receiver, response), … ]`. - - :raises DispatcherKeyError: - - if any receiver raises an error (specifically any subclass of - :exc:`Exception`), the error instance is returned as the result - for that receiver. - - """ - responses = [] - if not self.receivers: - return responses - - # Call each receiver with whatever arguments it can accept. - # Return a list of tuple pairs [(receiver, response), … ]. - for receiver in self._live_receivers(_make_id(sender)): - try: - response = receiver(signal=self, sender=sender, **named) - except Exception as err: - responses.append((receiver, err)) - else: - responses.append((receiver, response)) - return responses - - def _live_receivers(self, senderkey): - """Filter sequence of receivers to get resolved, live receivers. - - This checks for weak references and resolves them, then returning only - live receivers. - - """ - none_senderkey = _make_id(None) - receivers = [] - - for (receiverkey, r_senderkey), receiver in self.receivers: - if r_senderkey == none_senderkey or r_senderkey == senderkey: - if isinstance(receiver, WEAKREF_TYPES): - # Dereference the weak reference. - receiver = receiver() - if receiver is not None: - receivers.append(receiver) - else: - receivers.append(receiver) - return receivers - - def _remove_receiver(self, receiver): - """Remove dead receivers from connections.""" - - to_remove = [] - for key, connected_receiver in self.receivers: - if connected_receiver == receiver: - to_remove.append(key) - for key in to_remove: - for idx, (r_key, _) in enumerate(self.receivers): - if r_key == key: - del self.receivers[idx] - - def __repr__(self): - return ''.format(type(self).__name__) - - __str__ = __repr__ diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/encoding.py b/thesisenv/lib/python3.6/site-packages/celery/utils/encoding.py deleted file mode 100644 index 3ddcd35..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/encoding.py +++ /dev/null @@ -1,14 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils.encoding - ~~~~~~~~~~~~~~~~~~~~~ - - This module has moved to :mod:`kombu.utils.encoding`. - -""" -from __future__ import absolute_import - -from kombu.utils.encoding import ( # noqa - default_encode, default_encoding, bytes_t, bytes_to_str, str_t, - str_to_bytes, ensure_bytes, from_utf8, safe_str, safe_repr, -) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/functional.py b/thesisenv/lib/python3.6/site-packages/celery/utils/functional.py deleted file mode 100644 index e55b812..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/functional.py +++ /dev/null @@ -1,323 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils.functional - ~~~~~~~~~~~~~~~~~~~~~~~ - - Utilities for functions. - -""" -from __future__ import absolute_import - -import sys -import threading - -from functools import wraps -from itertools import islice - -from kombu.utils import cached_property -from kombu.utils.functional import lazy, maybe_evaluate, is_list, maybe_list -from kombu.utils.compat import OrderedDict - -from celery.five import UserDict, UserList, items, keys, range - -__all__ = ['LRUCache', 'is_list', 'maybe_list', 'memoize', 'mlazy', 'noop', - 'first', 'firstmethod', 'chunks', 'padlist', 'mattrgetter', 'uniq', - 'regen', 'dictfilter', 'lazy', 'maybe_evaluate'] - -IS_PYPY = hasattr(sys, 'pypy_version_info') - -KEYWORD_MARK = object() - - -class DummyContext(object): - - def __enter__(self): - return self - - def __exit__(self, *exc_info): - pass - - -class LRUCache(UserDict): - """LRU Cache implementation using a doubly linked list to track access. - - :keyword limit: The maximum number of keys to keep in the cache. - When a new key is inserted and the limit has been exceeded, - the *Least Recently Used* key will be discarded from the - cache. - - """ - - def __init__(self, limit=None): - self.limit = limit - self.mutex = threading.RLock() - self.data = OrderedDict() - - def __getitem__(self, key): - with self.mutex: - value = self[key] = self.data.pop(key) - return value - - def update(self, *args, **kwargs): - with self.mutex: - data, limit = self.data, self.limit - data.update(*args, **kwargs) - if limit and len(data) > limit: - # pop additional items in case limit exceeded - for _ in range(len(data) - limit): - data.popitem(last=False) - - def popitem(self, last=True): - with self.mutex: - return self.data.popitem(last) - - def __setitem__(self, key, value): - # remove least recently used key. - with self.mutex: - if self.limit and len(self.data) >= self.limit: - self.data.pop(next(iter(self.data))) - self.data[key] = value - - def __iter__(self): - return iter(self.data) - - def _iterate_items(self, _need_lock=IS_PYPY): - with self.mutex if _need_lock else DummyContext(): - for k in self: - try: - yield (k, self.data[k]) - except KeyError: # pragma: no cover - pass - iteritems = _iterate_items - - def _iterate_values(self, _need_lock=IS_PYPY): - with self.mutex if _need_lock else DummyContext(): - for k in self: - try: - yield self.data[k] - except KeyError: # pragma: no cover - pass - - itervalues = _iterate_values - - def _iterate_keys(self): - # userdict.keys in py3k calls __getitem__ - return keys(self.data) - iterkeys = _iterate_keys - - def incr(self, key, delta=1): - with self.mutex: - # this acts as memcached does- store as a string, but return a - # integer as long as it exists and we can cast it - newval = int(self.data.pop(key)) + delta - self[key] = str(newval) - return newval - - def __getstate__(self): - d = dict(vars(self)) - d.pop('mutex') - return d - - def __setstate__(self, state): - self.__dict__ = state - self.mutex = threading.RLock() - - if sys.version_info[0] == 3: # pragma: no cover - keys = _iterate_keys - values = _iterate_values - items = _iterate_items - else: # noqa - - def keys(self): - return list(self._iterate_keys()) - - def values(self): - return list(self._iterate_values()) - - def items(self): - return list(self._iterate_items()) - - -def memoize(maxsize=None, keyfun=None, Cache=LRUCache): - - def _memoize(fun): - mutex = threading.Lock() - cache = Cache(limit=maxsize) - - @wraps(fun) - def _M(*args, **kwargs): - if keyfun: - key = keyfun(args, kwargs) - else: - key = args + (KEYWORD_MARK, ) + tuple(sorted(kwargs.items())) - try: - with mutex: - value = cache[key] - except KeyError: - value = fun(*args, **kwargs) - _M.misses += 1 - with mutex: - cache[key] = value - else: - _M.hits += 1 - return value - - def clear(): - """Clear the cache and reset cache statistics.""" - cache.clear() - _M.hits = _M.misses = 0 - - _M.hits = _M.misses = 0 - _M.clear = clear - _M.original_func = fun - return _M - - return _memoize - - -class mlazy(lazy): - """Memoized lazy evaluation. - - The function is only evaluated once, every subsequent access - will return the same value. - - .. attribute:: evaluated - - Set to to :const:`True` after the object has been evaluated. - - """ - evaluated = False - _value = None - - def evaluate(self): - if not self.evaluated: - self._value = super(mlazy, self).evaluate() - self.evaluated = True - return self._value - - -def noop(*args, **kwargs): - """No operation. - - Takes any arguments/keyword arguments and does nothing. - - """ - pass - - -def first(predicate, it): - """Return the first element in `iterable` that `predicate` Gives a - :const:`True` value for. - - If `predicate` is None it will return the first item that is not None. - - """ - return next( - (v for v in it if (predicate(v) if predicate else v is not None)), - None, - ) - - -def firstmethod(method): - """Return a function that with a list of instances, - finds the first instance that gives a value for the given method. - - The list can also contain lazy instances - (:class:`~kombu.utils.functional.lazy`.) - - """ - - def _matcher(it, *args, **kwargs): - for obj in it: - try: - answer = getattr(maybe_evaluate(obj), method)(*args, **kwargs) - except AttributeError: - pass - else: - if answer is not None: - return answer - - return _matcher - - -def chunks(it, n): - """Split an iterator into chunks with `n` elements each. - - Examples - - # n == 2 - >>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 2) - >>> list(x) - [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10]] - - # n == 3 - >>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 3) - >>> list(x) - [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]] - - """ - # XXX This function is not used anymore, at least not by Celery itself. - for first in it: - yield [first] + list(islice(it, n - 1)) - - -def padlist(container, size, default=None): - """Pad list with default elements. - - Examples: - - >>> first, last, city = padlist(['George', 'Costanza', 'NYC'], 3) - ('George', 'Costanza', 'NYC') - >>> first, last, city = padlist(['George', 'Costanza'], 3) - ('George', 'Costanza', None) - >>> first, last, city, planet = padlist( - ... ['George', 'Costanza', 'NYC'], 4, default='Earth', - ... ) - ('George', 'Costanza', 'NYC', 'Earth') - - """ - return list(container)[:size] + [default] * (size - len(container)) - - -def mattrgetter(*attrs): - """Like :func:`operator.itemgetter` but return :const:`None` on missing - attributes instead of raising :exc:`AttributeError`.""" - return lambda obj: dict((attr, getattr(obj, attr, None)) - for attr in attrs) - - -def uniq(it): - """Return all unique elements in ``it``, preserving order.""" - seen = set() - return (seen.add(obj) or obj for obj in it if obj not in seen) - - -def regen(it): - """Regen takes any iterable, and if the object is an - generator it will cache the evaluated list on first access, - so that the generator can be "consumed" multiple times.""" - if isinstance(it, (list, tuple)): - return it - return _regen(it) - - -class _regen(UserList, list): - # must be subclass of list so that json can encode. - def __init__(self, it): - self.__it = it - - def __reduce__(self): - return list, (self.data, ) - - def __length_hint__(self): - return self.__it.__length_hint__() - - @cached_property - def data(self): - return list(self.__it) - - -def dictfilter(d=None, **kw): - """Remove all keys from dict ``d`` whose value is :const:`None`""" - d = kw if d is None else (dict(d, **kw) if kw else d) - return dict((k, v) for k, v in items(d) if v is not None) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/imports.py b/thesisenv/lib/python3.6/site-packages/celery/utils/imports.py deleted file mode 100644 index 22a2fdc..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/imports.py +++ /dev/null @@ -1,114 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils.import - ~~~~~~~~~~~~~~~~~~~ - - Utilities related to importing modules and symbols by name. - -""" -from __future__ import absolute_import - -import imp as _imp -import importlib -import os -import sys - -from contextlib import contextmanager - -from kombu.utils import symbol_by_name - -from celery.five import reload - -__all__ = [ - 'NotAPackage', 'qualname', 'instantiate', 'symbol_by_name', 'cwd_in_path', - 'find_module', 'import_from_cwd', 'reload_from_cwd', 'module_file', -] - - -class NotAPackage(Exception): - pass - - -if sys.version_info > (3, 3): # pragma: no cover - def qualname(obj): - if not hasattr(obj, '__name__') and hasattr(obj, '__class__'): - obj = obj.__class__ - q = getattr(obj, '__qualname__', None) - if '.' not in q: - q = '.'.join((obj.__module__, q)) - return q -else: - def qualname(obj): # noqa - if not hasattr(obj, '__name__') and hasattr(obj, '__class__'): - obj = obj.__class__ - return '.'.join((obj.__module__, obj.__name__)) - - -def instantiate(name, *args, **kwargs): - """Instantiate class by name. - - See :func:`symbol_by_name`. - - """ - return symbol_by_name(name)(*args, **kwargs) - - -@contextmanager -def cwd_in_path(): - cwd = os.getcwd() - if cwd in sys.path: - yield - else: - sys.path.insert(0, cwd) - try: - yield cwd - finally: - try: - sys.path.remove(cwd) - except ValueError: # pragma: no cover - pass - - -def find_module(module, path=None, imp=None): - """Version of :func:`imp.find_module` supporting dots.""" - if imp is None: - imp = importlib.import_module - with cwd_in_path(): - if '.' in module: - last = None - parts = module.split('.') - for i, part in enumerate(parts[:-1]): - mpart = imp('.'.join(parts[:i + 1])) - try: - path = mpart.__path__ - except AttributeError: - raise NotAPackage(module) - last = _imp.find_module(parts[i + 1], path) - return last - return _imp.find_module(module) - - -def import_from_cwd(module, imp=None, package=None): - """Import module, but make sure it finds modules - located in the current directory. - - Modules located in the current directory has - precedence over modules located in `sys.path`. - """ - if imp is None: - imp = importlib.import_module - with cwd_in_path(): - return imp(module, package=package) - - -def reload_from_cwd(module, reloader=None): - if reloader is None: - reloader = reload - with cwd_in_path(): - return reloader(module) - - -def module_file(module): - """Return the correct original file name of a module.""" - name = module.__file__ - return name[:-1] if name.endswith('.pyc') else name diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/iso8601.py b/thesisenv/lib/python3.6/site-packages/celery/utils/iso8601.py deleted file mode 100644 index c951cf6..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/iso8601.py +++ /dev/null @@ -1,77 +0,0 @@ -""" -Originally taken from pyiso8601 (http://code.google.com/p/pyiso8601/) - -Modified to match the behavior of dateutil.parser: - - - raise ValueError instead of ParseError - - return naive datetimes by default - - uses pytz.FixedOffset - -This is the original License: - -Copyright (c) 2007 Michael Twomey - -Permission is hereby granted, free of charge, to any person obtaining a -copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -""" -from __future__ import absolute_import - -import re - -from datetime import datetime -from pytz import FixedOffset - -__all__ = ['parse_iso8601'] - -# Adapted from http://delete.me.uk/2005/03/iso8601.html -ISO8601_REGEX = re.compile( - r'(?P[0-9]{4})(-(?P[0-9]{1,2})(-(?P[0-9]{1,2})' - r'((?P.)(?P[0-9]{2}):(?P[0-9]{2})' - '(:(?P[0-9]{2})(\.(?P[0-9]+))?)?' - r'(?PZ|(([-+])([0-9]{2}):([0-9]{2})))?)?)?)?' -) -TIMEZONE_REGEX = re.compile( - '(?P[+-])(?P[0-9]{2}).(?P[0-9]{2})' -) - - -def parse_iso8601(datestring): - """Parse and convert ISO 8601 string into a datetime object""" - m = ISO8601_REGEX.match(datestring) - if not m: - raise ValueError('unable to parse date string %r' % datestring) - groups = m.groupdict() - tz = groups['timezone'] - if tz == 'Z': - tz = FixedOffset(0) - elif tz: - m = TIMEZONE_REGEX.match(tz) - prefix, hours, minutes = m.groups() - hours, minutes = int(hours), int(minutes) - if prefix == '-': - hours = -hours - minutes = -minutes - tz = FixedOffset(minutes + hours * 60) - frac = groups['fraction'] or 0 - return datetime( - int(groups['year']), int(groups['month']), int(groups['day']), - int(groups['hour']), int(groups['minute']), int(groups['second']), - int(frac), tz - ) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/log.py b/thesisenv/lib/python3.6/site-packages/celery/utils/log.py deleted file mode 100644 index b786d39..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/log.py +++ /dev/null @@ -1,301 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils.log - ~~~~~~~~~~~~~~~~ - - Logging utilities. - -""" -from __future__ import absolute_import, print_function - -import logging -import numbers -import os -import sys -import threading -import traceback - -from contextlib import contextmanager -from billiard import current_process, util as mputil -from kombu.five import values -from kombu.log import get_logger as _get_logger, LOG_LEVELS -from kombu.utils.encoding import safe_str - -from celery.five import string_t, text_t - -from .term import colored - -__all__ = ['ColorFormatter', 'LoggingProxy', 'base_logger', - 'set_in_sighandler', 'in_sighandler', 'get_logger', - 'get_task_logger', 'mlevel', 'ensure_process_aware_logger', - 'get_multiprocessing_logger', 'reset_multiprocessing_logger'] - -_process_aware = False -PY3 = sys.version_info[0] == 3 - -MP_LOG = os.environ.get('MP_LOG', False) - - -# Sets up our logging hierarchy. -# -# Every logger in the celery package inherits from the "celery" -# logger, and every task logger inherits from the "celery.task" -# logger. -base_logger = logger = _get_logger('celery') -mp_logger = _get_logger('multiprocessing') - -_in_sighandler = False - - -def set_in_sighandler(value): - global _in_sighandler - _in_sighandler = value - - -def iter_open_logger_fds(): - seen = set() - loggers = (list(values(logging.Logger.manager.loggerDict)) + - [logging.getLogger(None)]) - for logger in loggers: - try: - for handler in logger.handlers: - try: - if handler not in seen: - yield handler.stream - seen.add(handler) - except AttributeError: - pass - except AttributeError: # PlaceHolder does not have handlers - pass - - -@contextmanager -def in_sighandler(): - set_in_sighandler(True) - try: - yield - finally: - set_in_sighandler(False) - - -def logger_isa(l, p, max=1000): - this, seen = l, set() - for _ in range(max): - if this == p: - return True - else: - if this in seen: - raise RuntimeError( - 'Logger {0!r} parents recursive'.format(l), - ) - seen.add(this) - this = this.parent - if not this: - break - else: - raise RuntimeError('Logger hierarchy exceeds {0}'.format(max)) - return False - - -def get_logger(name): - l = _get_logger(name) - if logging.root not in (l, l.parent) and l is not base_logger: - if not logger_isa(l, base_logger): - l.parent = base_logger - return l -task_logger = get_logger('celery.task') -worker_logger = get_logger('celery.worker') - - -def get_task_logger(name): - logger = get_logger(name) - if not logger_isa(logger, task_logger): - logger.parent = task_logger - return logger - - -def mlevel(level): - if level and not isinstance(level, numbers.Integral): - return LOG_LEVELS[level.upper()] - return level - - -class ColorFormatter(logging.Formatter): - #: Loglevel -> Color mapping. - COLORS = colored().names - colors = {'DEBUG': COLORS['blue'], 'WARNING': COLORS['yellow'], - 'ERROR': COLORS['red'], 'CRITICAL': COLORS['magenta']} - - def __init__(self, fmt=None, use_color=True): - logging.Formatter.__init__(self, fmt) - self.use_color = use_color - - def formatException(self, ei): - if ei and not isinstance(ei, tuple): - ei = sys.exc_info() - r = logging.Formatter.formatException(self, ei) - if isinstance(r, str) and not PY3: - return safe_str(r) - return r - - def format(self, record): - msg = logging.Formatter.format(self, record) - color = self.colors.get(record.levelname) - - # reset exception info later for other handlers... - einfo = sys.exc_info() if record.exc_info == 1 else record.exc_info - - if color and self.use_color: - try: - # safe_str will repr the color object - # and color will break on non-string objects - # so need to reorder calls based on type. - # Issue #427 - try: - if isinstance(msg, string_t): - return text_t(color(safe_str(msg))) - return safe_str(color(msg)) - except UnicodeDecodeError: - return safe_str(msg) # skip colors - except Exception as exc: - prev_msg, record.exc_info, record.msg = ( - record.msg, 1, ''.format( - type(msg), exc - ), - ) - try: - return logging.Formatter.format(self, record) - finally: - record.msg, record.exc_info = prev_msg, einfo - else: - return safe_str(msg) - - -class LoggingProxy(object): - """Forward file object to :class:`logging.Logger` instance. - - :param logger: The :class:`logging.Logger` instance to forward to. - :param loglevel: Loglevel to use when writing messages. - - """ - mode = 'w' - name = None - closed = False - loglevel = logging.ERROR - _thread = threading.local() - - def __init__(self, logger, loglevel=None): - self.logger = logger - self.loglevel = mlevel(loglevel or self.logger.level or self.loglevel) - self._safewrap_handlers() - - def _safewrap_handlers(self): - """Make the logger handlers dump internal errors to - `sys.__stderr__` instead of `sys.stderr` to circumvent - infinite loops.""" - - def wrap_handler(handler): # pragma: no cover - - class WithSafeHandleError(logging.Handler): - - def handleError(self, record): - exc_info = sys.exc_info() - try: - try: - traceback.print_exception(exc_info[0], - exc_info[1], - exc_info[2], - None, sys.__stderr__) - except IOError: - pass # see python issue 5971 - finally: - del(exc_info) - - handler.handleError = WithSafeHandleError().handleError - return [wrap_handler(h) for h in self.logger.handlers] - - def write(self, data): - """Write message to logging object.""" - if _in_sighandler: - return print(safe_str(data), file=sys.__stderr__) - if getattr(self._thread, 'recurse_protection', False): - # Logger is logging back to this file, so stop recursing. - return - data = data.strip() - if data and not self.closed: - self._thread.recurse_protection = True - try: - self.logger.log(self.loglevel, safe_str(data)) - finally: - self._thread.recurse_protection = False - - def writelines(self, sequence): - """`writelines(sequence_of_strings) -> None`. - - Write the strings to the file. - - The sequence can be any iterable object producing strings. - This is equivalent to calling :meth:`write` for each string. - - """ - for part in sequence: - self.write(part) - - def flush(self): - """This object is not buffered so any :meth:`flush` requests - are ignored.""" - pass - - def close(self): - """When the object is closed, no write requests are forwarded to - the logging object anymore.""" - self.closed = True - - def isatty(self): - """Always return :const:`False`. Just here for file support.""" - return False - - -def ensure_process_aware_logger(force=False): - """Make sure process name is recorded when loggers are used.""" - global _process_aware - if force or not _process_aware: - logging._acquireLock() - try: - _process_aware = True - Logger = logging.getLoggerClass() - if getattr(Logger, '_process_aware', False): # pragma: no cover - return - - class ProcessAwareLogger(Logger): - _signal_safe = True - _process_aware = True - - def makeRecord(self, *args, **kwds): - record = Logger.makeRecord(self, *args, **kwds) - record.processName = current_process()._name - return record - - def log(self, *args, **kwargs): - if _in_sighandler: - return - return Logger.log(self, *args, **kwargs) - logging.setLoggerClass(ProcessAwareLogger) - finally: - logging._releaseLock() - - -def get_multiprocessing_logger(): - return mputil.get_logger() if mputil else None - - -def reset_multiprocessing_logger(): - if mputil and hasattr(mputil, '_logger'): - mputil._logger = None - - -def current_process_index(base=1): - if current_process: - index = getattr(current_process(), 'index', None) - return index + base if index is not None else index -ensure_process_aware_logger() diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/mail.py b/thesisenv/lib/python3.6/site-packages/celery/utils/mail.py deleted file mode 100644 index 00c5f29..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/mail.py +++ /dev/null @@ -1,190 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils.mail - ~~~~~~~~~~~~~~~~~ - - How task error emails are formatted and sent. - -""" -from __future__ import absolute_import - -import smtplib -import socket -import traceback -import warnings - -from email.mime.text import MIMEText - -from .functional import maybe_list - -try: - from ssl import SSLError -except ImportError: # pragma: no cover - class SSLError(Exception): # noqa - """fallback used when ssl module not compiled.""" - -__all__ = ['SendmailWarning', 'Message', 'Mailer', 'ErrorMail'] - -_local_hostname = None - - -def get_local_hostname(): - global _local_hostname - if _local_hostname is None: - _local_hostname = socket.getfqdn() - return _local_hostname - - -class SendmailWarning(UserWarning): - """Problem happened while sending the email message.""" - - -class Message(object): - - def __init__(self, to=None, sender=None, subject=None, - body=None, charset='us-ascii'): - self.to = maybe_list(to) - self.sender = sender - self.subject = subject - self.body = body - self.charset = charset - - def __repr__(self): - return ''.format(self) - - def __str__(self): - msg = MIMEText(self.body, 'plain', self.charset) - msg['Subject'] = self.subject - msg['From'] = self.sender - msg['To'] = ', '.join(self.to) - return msg.as_string() - - -class Mailer(object): - - def __init__(self, host='localhost', port=0, user=None, password=None, - timeout=2, use_ssl=False, use_tls=False): - self.host = host - self.port = port - self.user = user - self.password = password - self.timeout = timeout - self.use_ssl = use_ssl - self.use_tls = use_tls - - def send(self, message, fail_silently=False, **kwargs): - try: - self._send(message, **kwargs) - except Exception as exc: - if not fail_silently: - raise - warnings.warn(SendmailWarning( - 'Mail could not be sent: {0!r} {1!r}\n{2!r}'.format( - exc, {'To': ', '.join(message.to), - 'Subject': message.subject}, - traceback.format_stack()))) - - def _send(self, message, **kwargs): - Client = smtplib.SMTP_SSL if self.use_ssl else smtplib.SMTP - client = Client(self.host, self.port, timeout=self.timeout, - local_hostname=get_local_hostname(), **kwargs) - - if self.use_tls: - client.ehlo() - client.starttls() - client.ehlo() - - if self.user and self.password: - client.login(self.user, self.password) - - client.sendmail(message.sender, message.to, str(message)) - try: - client.quit() - except SSLError: - client.close() - - -class ErrorMail(object): - """Defines how and when task error e-mails should be sent. - - :param task: The task instance that raised the error. - - :attr:`subject` and :attr:`body` are format strings which - are passed a context containing the following keys: - - * name - - Name of the task. - - * id - - UUID of the task. - - * exc - - String representation of the exception. - - * args - - Positional arguments. - - * kwargs - - Keyword arguments. - - * traceback - - String representation of the traceback. - - * hostname - - Worker nodename. - - """ - - # pep8.py borks on a inline signature separator and - # says "trailing whitespace" ;) - EMAIL_SIGNATURE_SEP = '-- ' - - #: Format string used to generate error email subjects. - subject = """\ - [{hostname}] Error: Task {name} ({id}): {exc!r} - """ - - #: Format string used to generate error email content. - body = """ -Task {{name}} with id {{id}} raised exception:\n{{exc!r}} - - -Task was called with args: {{args}} kwargs: {{kwargs}}. - -The contents of the full traceback was: - -{{traceback}} - -{EMAIL_SIGNATURE_SEP} -Just to let you know, -py-celery at {{hostname}}. -""".format(EMAIL_SIGNATURE_SEP=EMAIL_SIGNATURE_SEP) - - def __init__(self, task, **kwargs): - self.task = task - self.subject = kwargs.get('subject', self.subject) - self.body = kwargs.get('body', self.body) - - def should_send(self, context, exc): - """Return true or false depending on if a task error mail - should be sent for this type of error.""" - return True - - def format_subject(self, context): - return self.subject.strip().format(**context) - - def format_body(self, context): - return self.body.strip().format(**context) - - def send(self, context, exc, fail_silently=True): - if self.should_send(context, exc): - self.task.app.mail_admins(self.format_subject(context), - self.format_body(context), - fail_silently=fail_silently) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/objects.py b/thesisenv/lib/python3.6/site-packages/celery/utils/objects.py deleted file mode 100644 index 8a2f7f6..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/objects.py +++ /dev/null @@ -1,91 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils.objects - ~~~~~~~~~~~~~~~~~~~~ - - Object related utilities including introspection, etc. - -""" -from __future__ import absolute_import - -__all__ = ['mro_lookup'] - - -class Bunch(object): - """Object that enables you to modify attributes.""" - - def __init__(self, **kwargs): - self.__dict__.update(kwargs) - - -def mro_lookup(cls, attr, stop=(), monkey_patched=[]): - """Return the first node by MRO order that defines an attribute. - - :keyword stop: A list of types that if reached will stop the search. - :keyword monkey_patched: Use one of the stop classes if the attr's - module origin is not in this list, this to detect monkey patched - attributes. - - :returns None: if the attribute was not found. - - """ - for node in cls.mro(): - if node in stop: - try: - attr = node.__dict__[attr] - module_origin = attr.__module__ - except (AttributeError, KeyError): - pass - else: - if module_origin not in monkey_patched: - return node - return - if attr in node.__dict__: - return node - - -class FallbackContext(object): - """The built-in ``@contextmanager`` utility does not work well - when wrapping other contexts, as the traceback is wrong when - the wrapped context raises. - - This solves this problem and can be used instead of ``@contextmanager`` - in this example:: - - @contextmanager - def connection_or_default_connection(connection=None): - if connection: - # user already has a connection, should not close - # after use - yield connection - else: - # must have new connection, and also close the connection - # after the block returns - with create_new_connection() as connection: - yield connection - - This wrapper can be used instead for the above like this:: - - def connection_or_default_connection(connection=None): - return FallbackContext(connection, create_new_connection) - - """ - - def __init__(self, provided, fallback, *fb_args, **fb_kwargs): - self.provided = provided - self.fallback = fallback - self.fb_args = fb_args - self.fb_kwargs = fb_kwargs - self._context = None - - def __enter__(self): - if self.provided is not None: - return self.provided - context = self._context = self.fallback( - *self.fb_args, **self.fb_kwargs - ).__enter__() - return context - - def __exit__(self, *exc_info): - if self._context is not None: - return self._context.__exit__(*exc_info) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/serialization.py b/thesisenv/lib/python3.6/site-packages/celery/utils/serialization.py deleted file mode 100644 index d5509f1..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/serialization.py +++ /dev/null @@ -1,167 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils.serialization - ~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Utilities for safely pickling exceptions. - -""" -from __future__ import absolute_import - -from inspect import getmro -from itertools import takewhile - -try: - import cPickle as pickle -except ImportError: - import pickle # noqa - -from .encoding import safe_repr - -__all__ = ['UnpickleableExceptionWrapper', 'subclass_exception', - 'find_pickleable_exception', 'create_exception_cls', - 'get_pickleable_exception', 'get_pickleable_etype', - 'get_pickled_exception'] - -#: List of base classes we probably don't want to reduce to. -try: - unwanted_base_classes = (StandardError, Exception, BaseException, object) -except NameError: # pragma: no cover - unwanted_base_classes = (Exception, BaseException, object) # py3k - - -def subclass_exception(name, parent, module): # noqa - return type(name, (parent, ), {'__module__': module}) - - -def find_pickleable_exception(exc, loads=pickle.loads, - dumps=pickle.dumps): - """With an exception instance, iterate over its super classes (by mro) - and find the first super exception that is pickleable. It does - not go below :exc:`Exception` (i.e. it skips :exc:`Exception`, - :class:`BaseException` and :class:`object`). If that happens - you should use :exc:`UnpickleableException` instead. - - :param exc: An exception instance. - - Will return the nearest pickleable parent exception class - (except :exc:`Exception` and parents), or if the exception is - pickleable it will return :const:`None`. - - :rtype :exc:`Exception`: - - """ - exc_args = getattr(exc, 'args', []) - for supercls in itermro(exc.__class__, unwanted_base_classes): - try: - superexc = supercls(*exc_args) - loads(dumps(superexc)) - except: - pass - else: - return superexc -find_nearest_pickleable_exception = find_pickleable_exception # XXX compat - - -def itermro(cls, stop): - return takewhile(lambda sup: sup not in stop, getmro(cls)) - - -def create_exception_cls(name, module, parent=None): - """Dynamically create an exception class.""" - if not parent: - parent = Exception - return subclass_exception(name, parent, module) - - -class UnpickleableExceptionWrapper(Exception): - """Wraps unpickleable exceptions. - - :param exc_module: see :attr:`exc_module`. - :param exc_cls_name: see :attr:`exc_cls_name`. - :param exc_args: see :attr:`exc_args` - - **Example** - - .. code-block:: python - - >>> def pickle_it(raising_function): - ... try: - ... raising_function() - ... except Exception as e: - ... exc = UnpickleableExceptionWrapper( - ... e.__class__.__module__, - ... e.__class__.__name__, - ... e.args, - ... ) - ... pickle.dumps(exc) # Works fine. - - """ - - #: The module of the original exception. - exc_module = None - - #: The name of the original exception class. - exc_cls_name = None - - #: The arguments for the original exception. - exc_args = None - - def __init__(self, exc_module, exc_cls_name, exc_args, text=None): - safe_exc_args = [] - for arg in exc_args: - try: - pickle.dumps(arg) - safe_exc_args.append(arg) - except Exception: - safe_exc_args.append(safe_repr(arg)) - self.exc_module = exc_module - self.exc_cls_name = exc_cls_name - self.exc_args = safe_exc_args - self.text = text - Exception.__init__(self, exc_module, exc_cls_name, safe_exc_args, text) - - def restore(self): - return create_exception_cls(self.exc_cls_name, - self.exc_module)(*self.exc_args) - - def __str__(self): - return self.text - - @classmethod - def from_exception(cls, exc): - return cls(exc.__class__.__module__, - exc.__class__.__name__, - getattr(exc, 'args', []), - safe_repr(exc)) - - -def get_pickleable_exception(exc): - """Make sure exception is pickleable.""" - try: - pickle.loads(pickle.dumps(exc)) - except Exception: - pass - else: - return exc - nearest = find_pickleable_exception(exc) - if nearest: - return nearest - return UnpickleableExceptionWrapper.from_exception(exc) - - -def get_pickleable_etype(cls, loads=pickle.loads, dumps=pickle.dumps): - try: - loads(dumps(cls)) - except: - return Exception - else: - return cls - - -def get_pickled_exception(exc): - """Get original exception from exception pickled using - :meth:`get_pickleable_exception`.""" - if isinstance(exc, UnpickleableExceptionWrapper): - return exc.restore() - return exc diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/sysinfo.py b/thesisenv/lib/python3.6/site-packages/celery/utils/sysinfo.py deleted file mode 100644 index 65073a6..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/sysinfo.py +++ /dev/null @@ -1,45 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import - -import os - -from math import ceil - -from kombu.utils import cached_property - -__all__ = ['load_average', 'df'] - - -if hasattr(os, 'getloadavg'): - - def load_average(): - return tuple(ceil(l * 1e2) / 1e2 for l in os.getloadavg()) - -else: # pragma: no cover - # Windows doesn't have getloadavg - def load_average(): # noqa - return (0.0, 0.0, 0.0) - - -class df(object): - - def __init__(self, path): - self.path = path - - @property - def total_blocks(self): - return self.stat.f_blocks * self.stat.f_frsize / 1024 - - @property - def available(self): - return self.stat.f_bavail * self.stat.f_frsize / 1024 - - @property - def capacity(self): - avail = self.stat.f_bavail - used = self.stat.f_blocks - self.stat.f_bfree - return int(ceil(used * 100.0 / (used + avail) + 0.5)) - - @cached_property - def stat(self): - return os.statvfs(os.path.abspath(self.path)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/term.py b/thesisenv/lib/python3.6/site-packages/celery/utils/term.py deleted file mode 100644 index 430c695..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/term.py +++ /dev/null @@ -1,162 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils.term - ~~~~~~~~~~~~~~~~~ - - Terminals and colors. - -""" -from __future__ import absolute_import, unicode_literals - -import platform - -from functools import reduce - -from kombu.utils.encoding import safe_str -from celery.five import string - -__all__ = ['colored'] - -IS_WINDOWS = platform.system() == 'Windows' - -BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8) -OP_SEQ = '\033[%dm' -RESET_SEQ = '\033[0m' -COLOR_SEQ = '\033[1;%dm' - - -def fg(s): - return COLOR_SEQ % s - - -class colored(object): - """Terminal colored text. - - Example:: - >>> c = colored(enabled=True) - >>> print(str(c.red('the quick '), c.blue('brown ', c.bold('fox ')), - ... c.magenta(c.underline('jumps over')), - ... c.yellow(' the lazy '), - ... c.green('dog '))) - - """ - - def __init__(self, *s, **kwargs): - self.s = s - self.enabled = not IS_WINDOWS and kwargs.get('enabled', True) - self.op = kwargs.get('op', '') - self.names = {'black': self.black, - 'red': self.red, - 'green': self.green, - 'yellow': self.yellow, - 'blue': self.blue, - 'magenta': self.magenta, - 'cyan': self.cyan, - 'white': self.white} - - def _add(self, a, b): - return string(a) + string(b) - - def _fold_no_color(self, a, b): - try: - A = a.no_color() - except AttributeError: - A = string(a) - try: - B = b.no_color() - except AttributeError: - B = string(b) - - return ''.join((string(A), string(B))) - - def no_color(self): - if self.s: - return string(reduce(self._fold_no_color, self.s)) - return '' - - def embed(self): - prefix = '' - if self.enabled: - prefix = self.op - return ''.join((string(prefix), string(reduce(self._add, self.s)))) - - def __unicode__(self): - suffix = '' - if self.enabled: - suffix = RESET_SEQ - return string(''.join((self.embed(), string(suffix)))) - - def __str__(self): - return safe_str(self.__unicode__()) - - def node(self, s, op): - return self.__class__(enabled=self.enabled, op=op, *s) - - def black(self, *s): - return self.node(s, fg(30 + BLACK)) - - def red(self, *s): - return self.node(s, fg(30 + RED)) - - def green(self, *s): - return self.node(s, fg(30 + GREEN)) - - def yellow(self, *s): - return self.node(s, fg(30 + YELLOW)) - - def blue(self, *s): - return self.node(s, fg(30 + BLUE)) - - def magenta(self, *s): - return self.node(s, fg(30 + MAGENTA)) - - def cyan(self, *s): - return self.node(s, fg(30 + CYAN)) - - def white(self, *s): - return self.node(s, fg(30 + WHITE)) - - def __repr__(self): - return repr(self.no_color()) - - def bold(self, *s): - return self.node(s, OP_SEQ % 1) - - def underline(self, *s): - return self.node(s, OP_SEQ % 4) - - def blink(self, *s): - return self.node(s, OP_SEQ % 5) - - def reverse(self, *s): - return self.node(s, OP_SEQ % 7) - - def bright(self, *s): - return self.node(s, OP_SEQ % 8) - - def ired(self, *s): - return self.node(s, fg(40 + RED)) - - def igreen(self, *s): - return self.node(s, fg(40 + GREEN)) - - def iyellow(self, *s): - return self.node(s, fg(40 + YELLOW)) - - def iblue(self, *s): - return self.node(s, fg(40 + BLUE)) - - def imagenta(self, *s): - return self.node(s, fg(40 + MAGENTA)) - - def icyan(self, *s): - return self.node(s, fg(40 + CYAN)) - - def iwhite(self, *s): - return self.node(s, fg(40 + WHITE)) - - def reset(self, *s): - return self.node(s or [''], RESET_SEQ) - - def __add__(self, other): - return string(self) + string(other) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/text.py b/thesisenv/lib/python3.6/site-packages/celery/utils/text.py deleted file mode 100644 index ffd2d72..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/text.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils.text - ~~~~~~~~~~~~~~~~~ - - Text formatting utilities - -""" -from __future__ import absolute_import - -from textwrap import fill - -from pprint import pformat - -__all__ = ['dedent_initial', 'dedent', 'fill_paragraphs', 'join', - 'ensure_2lines', 'abbr', 'abbrtask', 'indent', 'truncate', - 'pluralize', 'pretty'] - - -def dedent_initial(s, n=4): - return s[n:] if s[:n] == ' ' * n else s - - -def dedent(s, n=4, sep='\n'): - return sep.join(dedent_initial(l) for l in s.splitlines()) - - -def fill_paragraphs(s, width, sep='\n'): - return sep.join(fill(p, width) for p in s.split(sep)) - - -def join(l, sep='\n'): - return sep.join(v for v in l if v) - - -def ensure_2lines(s, sep='\n'): - if len(s.splitlines()) <= 2: - return s + sep - return s - - -def abbr(S, max, ellipsis='...'): - if S is None: - return '???' - if len(S) > max: - return ellipsis and (S[:max - len(ellipsis)] + ellipsis) or S[:max] - return S - - -def abbrtask(S, max): - if S is None: - return '???' - if len(S) > max: - module, _, cls = S.rpartition('.') - module = abbr(module, max - len(cls) - 3, False) - return module + '[.]' + cls - return S - - -def indent(t, indent=0, sep='\n'): - """Indent text.""" - return sep.join(' ' * indent + p for p in t.split(sep)) - - -def truncate(text, maxlen=128, suffix='...'): - """Truncates text to a maximum number of characters.""" - if len(text) >= maxlen: - return text[:maxlen].rsplit(' ', 1)[0] + suffix - return text - - -def pluralize(n, text, suffix='s'): - if n > 1: - return text + suffix - return text - - -def pretty(value, width=80, nl_width=80, sep='\n', **kw): - if isinstance(value, dict): - return '{{{0} {1}'.format(sep, pformat(value, 4, nl_width)[1:]) - elif isinstance(value, tuple): - return '{0}{1}{2}'.format( - sep, ' ' * 4, pformat(value, width=nl_width, **kw), - ) - else: - return pformat(value, width=width, **kw) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/threads.py b/thesisenv/lib/python3.6/site-packages/celery/utils/threads.py deleted file mode 100644 index 5d42373..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/threads.py +++ /dev/null @@ -1,329 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils.threads - ~~~~~~~~~~~~~~~~~~~~ - - Threading utilities. - -""" -from __future__ import absolute_import, print_function - -import os -import socket -import sys -import threading -import traceback - -from contextlib import contextmanager - -from celery.local import Proxy -from celery.five import THREAD_TIMEOUT_MAX, items - -__all__ = ['bgThread', 'Local', 'LocalStack', 'LocalManager', - 'get_ident', 'default_socket_timeout'] - -USE_FAST_LOCALS = os.environ.get('USE_FAST_LOCALS') -PY3 = sys.version_info[0] == 3 - - -@contextmanager -def default_socket_timeout(timeout): - prev = socket.getdefaulttimeout() - socket.setdefaulttimeout(timeout) - yield - socket.setdefaulttimeout(prev) - - -class bgThread(threading.Thread): - - def __init__(self, name=None, **kwargs): - super(bgThread, self).__init__() - self._is_shutdown = threading.Event() - self._is_stopped = threading.Event() - self.daemon = True - self.name = name or self.__class__.__name__ - - def body(self): - raise NotImplementedError('subclass responsibility') - - def on_crash(self, msg, *fmt, **kwargs): - print(msg.format(*fmt), file=sys.stderr) - exc_info = sys.exc_info() - try: - traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], - None, sys.stderr) - finally: - del(exc_info) - - def run(self): - body = self.body - shutdown_set = self._is_shutdown.is_set - try: - while not shutdown_set(): - try: - body() - except Exception as exc: - try: - self.on_crash('{0!r} crashed: {1!r}', self.name, exc) - self._set_stopped() - finally: - os._exit(1) # exiting by normal means won't work - finally: - self._set_stopped() - - def _set_stopped(self): - try: - self._is_stopped.set() - except TypeError: # pragma: no cover - # we lost the race at interpreter shutdown, - # so gc collected built-in modules. - pass - - def stop(self): - """Graceful shutdown.""" - self._is_shutdown.set() - self._is_stopped.wait() - if self.is_alive(): - self.join(THREAD_TIMEOUT_MAX) - -try: - from greenlet import getcurrent as get_ident -except ImportError: # pragma: no cover - try: - from _thread import get_ident # noqa - except ImportError: - try: - from thread import get_ident # noqa - except ImportError: # pragma: no cover - try: - from _dummy_thread import get_ident # noqa - except ImportError: - from dummy_thread import get_ident # noqa - - -def release_local(local): - """Releases the contents of the local for the current context. - This makes it possible to use locals without a manager. - - Example:: - - >>> loc = Local() - >>> loc.foo = 42 - >>> release_local(loc) - >>> hasattr(loc, 'foo') - False - - With this function one can release :class:`Local` objects as well - as :class:`StackLocal` objects. However it is not possible to - release data held by proxies that way, one always has to retain - a reference to the underlying local object in order to be able - to release it. - - .. versionadded:: 0.6.1 - """ - local.__release_local__() - - -class Local(object): - __slots__ = ('__storage__', '__ident_func__') - - def __init__(self): - object.__setattr__(self, '__storage__', {}) - object.__setattr__(self, '__ident_func__', get_ident) - - def __iter__(self): - return iter(items(self.__storage__)) - - def __call__(self, proxy): - """Create a proxy for a name.""" - return Proxy(self, proxy) - - def __release_local__(self): - self.__storage__.pop(self.__ident_func__(), None) - - def __getattr__(self, name): - try: - return self.__storage__[self.__ident_func__()][name] - except KeyError: - raise AttributeError(name) - - def __setattr__(self, name, value): - ident = self.__ident_func__() - storage = self.__storage__ - try: - storage[ident][name] = value - except KeyError: - storage[ident] = {name: value} - - def __delattr__(self, name): - try: - del self.__storage__[self.__ident_func__()][name] - except KeyError: - raise AttributeError(name) - - -class _LocalStack(object): - """This class works similar to a :class:`Local` but keeps a stack - of objects instead. This is best explained with an example:: - - >>> ls = LocalStack() - >>> ls.push(42) - >>> ls.top - 42 - >>> ls.push(23) - >>> ls.top - 23 - >>> ls.pop() - 23 - >>> ls.top - 42 - - They can be force released by using a :class:`LocalManager` or with - the :func:`release_local` function but the correct way is to pop the - item from the stack after using. When the stack is empty it will - no longer be bound to the current context (and as such released). - - By calling the stack without arguments it will return a proxy that - resolves to the topmost item on the stack. - - """ - - def __init__(self): - self._local = Local() - - def __release_local__(self): - self._local.__release_local__() - - def _get__ident_func__(self): - return self._local.__ident_func__ - - def _set__ident_func__(self, value): - object.__setattr__(self._local, '__ident_func__', value) - __ident_func__ = property(_get__ident_func__, _set__ident_func__) - del _get__ident_func__, _set__ident_func__ - - def __call__(self): - def _lookup(): - rv = self.top - if rv is None: - raise RuntimeError('object unbound') - return rv - return Proxy(_lookup) - - def push(self, obj): - """Pushes a new item to the stack""" - rv = getattr(self._local, 'stack', None) - if rv is None: - self._local.stack = rv = [] - rv.append(obj) - return rv - - def pop(self): - """Remove the topmost item from the stack, will return the - old value or `None` if the stack was already empty. - """ - stack = getattr(self._local, 'stack', None) - if stack is None: - return None - elif len(stack) == 1: - release_local(self._local) - return stack[-1] - else: - return stack.pop() - - def __len__(self): - stack = getattr(self._local, 'stack', None) - return len(stack) if stack else 0 - - @property - def stack(self): - """get_current_worker_task uses this to find - the original task that was executed by the worker.""" - stack = getattr(self._local, 'stack', None) - if stack is not None: - return stack - return [] - - @property - def top(self): - """The topmost item on the stack. If the stack is empty, - `None` is returned. - """ - try: - return self._local.stack[-1] - except (AttributeError, IndexError): - return None - - -class LocalManager(object): - """Local objects cannot manage themselves. For that you need a local - manager. You can pass a local manager multiple locals or add them - later by appending them to `manager.locals`. Everytime the manager - cleans up it, will clean up all the data left in the locals for this - context. - - The `ident_func` parameter can be added to override the default ident - function for the wrapped locals. - - """ - - def __init__(self, locals=None, ident_func=None): - if locals is None: - self.locals = [] - elif isinstance(locals, Local): - self.locals = [locals] - else: - self.locals = list(locals) - if ident_func is not None: - self.ident_func = ident_func - for local in self.locals: - object.__setattr__(local, '__ident_func__', ident_func) - else: - self.ident_func = get_ident - - def get_ident(self): - """Return the context identifier the local objects use internally - for this context. You cannot override this method to change the - behavior but use it to link other context local objects (such as - SQLAlchemy's scoped sessions) to the Werkzeug locals.""" - return self.ident_func() - - def cleanup(self): - """Manually clean up the data in the locals for this context. - - Call this at the end of the request or use `make_middleware()`. - - """ - for local in self.locals: - release_local(local) - - def __repr__(self): - return '<{0} storages: {1}>'.format( - self.__class__.__name__, len(self.locals)) - - -class _FastLocalStack(threading.local): - - def __init__(self): - self.stack = [] - self.push = self.stack.append - self.pop = self.stack.pop - - @property - def top(self): - try: - return self.stack[-1] - except (AttributeError, IndexError): - return None - - def __len__(self): - return len(self.stack) - -if USE_FAST_LOCALS: # pragma: no cover - LocalStack = _FastLocalStack -else: - # - See #706 - # since each thread has its own greenlet we can just use those as - # identifiers for the context. If greenlets are not available we - # fall back to the current thread ident. - LocalStack = _LocalStack # noqa diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/timer2.py b/thesisenv/lib/python3.6/site-packages/celery/utils/timer2.py deleted file mode 100644 index e42660c..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/timer2.py +++ /dev/null @@ -1,144 +0,0 @@ -# -*- coding: utf-8 -*- -""" - timer2 - ~~~~~~ - - Scheduler for Python functions. - -""" -from __future__ import absolute_import - -import os -import sys -import threading - -from itertools import count -from time import sleep - -from celery.five import THREAD_TIMEOUT_MAX -from kombu.async.timer import Entry, Timer as Schedule, to_timestamp, logger - -TIMER_DEBUG = os.environ.get('TIMER_DEBUG') - -__all__ = ['Entry', 'Schedule', 'Timer', 'to_timestamp'] - - -class Timer(threading.Thread): - Entry = Entry - Schedule = Schedule - - running = False - on_tick = None - _timer_count = count(1) - - if TIMER_DEBUG: # pragma: no cover - def start(self, *args, **kwargs): - import traceback - print('- Timer starting') - traceback.print_stack() - super(Timer, self).start(*args, **kwargs) - - def __init__(self, schedule=None, on_error=None, on_tick=None, - on_start=None, max_interval=None, **kwargs): - self.schedule = schedule or self.Schedule(on_error=on_error, - max_interval=max_interval) - self.on_start = on_start - self.on_tick = on_tick or self.on_tick - threading.Thread.__init__(self) - self._is_shutdown = threading.Event() - self._is_stopped = threading.Event() - self.mutex = threading.Lock() - self.not_empty = threading.Condition(self.mutex) - self.daemon = True - self.name = 'Timer-{0}'.format(next(self._timer_count)) - - def _next_entry(self): - with self.not_empty: - delay, entry = next(self.scheduler) - if entry is None: - if delay is None: - self.not_empty.wait(1.0) - return delay - return self.schedule.apply_entry(entry) - __next__ = next = _next_entry # for 2to3 - - def run(self): - try: - self.running = True - self.scheduler = iter(self.schedule) - - while not self._is_shutdown.isSet(): - delay = self._next_entry() - if delay: - if self.on_tick: - self.on_tick(delay) - if sleep is None: # pragma: no cover - break - sleep(delay) - try: - self._is_stopped.set() - except TypeError: # pragma: no cover - # we lost the race at interpreter shutdown, - # so gc collected built-in modules. - pass - except Exception as exc: - logger.error('Thread Timer crashed: %r', exc, exc_info=True) - os._exit(1) - - def stop(self): - self._is_shutdown.set() - if self.running: - self._is_stopped.wait() - self.join(THREAD_TIMEOUT_MAX) - self.running = False - - def ensure_started(self): - if not self.running and not self.isAlive(): - if self.on_start: - self.on_start(self) - self.start() - - def _do_enter(self, meth, *args, **kwargs): - self.ensure_started() - with self.mutex: - entry = getattr(self.schedule, meth)(*args, **kwargs) - self.not_empty.notify() - return entry - - def enter(self, entry, eta, priority=None): - return self._do_enter('enter_at', entry, eta, priority=priority) - - def call_at(self, *args, **kwargs): - return self._do_enter('call_at', *args, **kwargs) - - def enter_after(self, *args, **kwargs): - return self._do_enter('enter_after', *args, **kwargs) - - def call_after(self, *args, **kwargs): - return self._do_enter('call_after', *args, **kwargs) - - def call_repeatedly(self, *args, **kwargs): - return self._do_enter('call_repeatedly', *args, **kwargs) - - def exit_after(self, secs, priority=10): - self.call_after(secs, sys.exit, priority) - - def cancel(self, tref): - tref.cancel() - - def clear(self): - self.schedule.clear() - - def empty(self): - return not len(self) - - def __len__(self): - return len(self.schedule) - - def __bool__(self): - return True - __nonzero__ = __bool__ - - @property - def queue(self): - return self.schedule.queue diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/timeutils.py b/thesisenv/lib/python3.6/site-packages/celery/utils/timeutils.py deleted file mode 100644 index 6dab703..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/timeutils.py +++ /dev/null @@ -1,370 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils.timeutils - ~~~~~~~~~~~~~~~~~~~~~~ - - This module contains various utilities related to dates and times. - -""" -from __future__ import absolute_import - -import numbers -import os -import sys -import time as _time - -from calendar import monthrange -from datetime import date, datetime, timedelta, tzinfo - -from kombu.utils import cached_property, reprcall -from kombu.utils.compat import timedelta_seconds - -from pytz import timezone as _timezone, AmbiguousTimeError, FixedOffset - -from celery.five import string_t - -from .functional import dictfilter -from .iso8601 import parse_iso8601 -from .text import pluralize - -__all__ = ['LocalTimezone', 'timezone', 'maybe_timedelta', 'timedelta_seconds', - 'delta_resolution', 'remaining', 'rate', 'weekday', - 'humanize_seconds', 'maybe_iso8601', 'is_naive', 'make_aware', - 'localize', 'to_utc', 'maybe_make_aware', 'ffwd', 'utcoffset', - 'adjust_timestamp', 'maybe_s_to_ms'] - -PY3 = sys.version_info[0] == 3 -PY33 = sys.version_info >= (3, 3) - -C_REMDEBUG = os.environ.get('C_REMDEBUG', False) - -DAYNAMES = 'sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat' -WEEKDAYS = dict(zip(DAYNAMES, range(7))) - -RATE_MODIFIER_MAP = {'s': lambda n: n, - 'm': lambda n: n / 60.0, - 'h': lambda n: n / 60.0 / 60.0} - -TIME_UNITS = (('day', 60 * 60 * 24.0, lambda n: format(n, '.2f')), - ('hour', 60 * 60.0, lambda n: format(n, '.2f')), - ('minute', 60.0, lambda n: format(n, '.2f')), - ('second', 1.0, lambda n: format(n, '.2f'))) - -ZERO = timedelta(0) - -_local_timezone = None - - -class LocalTimezone(tzinfo): - """Local time implementation taken from Python's docs. - - Used only when UTC is not enabled. - """ - _offset_cache = {} - - def __init__(self): - # This code is moved in __init__ to execute it as late as possible - # See get_default_timezone(). - self.STDOFFSET = timedelta(seconds=-_time.timezone) - if _time.daylight: - self.DSTOFFSET = timedelta(seconds=-_time.altzone) - else: - self.DSTOFFSET = self.STDOFFSET - self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET - tzinfo.__init__(self) - - def __repr__(self): - return ''.format( - int(timedelta_seconds(self.DSTOFFSET) / 3600), - ) - - def utcoffset(self, dt): - return self.DSTOFFSET if self._isdst(dt) else self.STDOFFSET - - def dst(self, dt): - return self.DSTDIFF if self._isdst(dt) else ZERO - - def tzname(self, dt): - return _time.tzname[self._isdst(dt)] - - if PY3: - - def fromutc(self, dt): - # The base tzinfo class no longer implements a DST - # offset aware .fromutc() in Python3 (Issue #2306). - - # I'd rather rely on pytz to do this, than port - # the C code from cpython's fromutc [asksol] - offset = int(self.utcoffset(dt).seconds / 60.0) - try: - tz = self._offset_cache[offset] - except KeyError: - tz = self._offset_cache[offset] = FixedOffset(offset) - return tz.fromutc(dt.replace(tzinfo=tz)) - - def _isdst(self, dt): - tt = (dt.year, dt.month, dt.day, - dt.hour, dt.minute, dt.second, - dt.weekday(), 0, 0) - stamp = _time.mktime(tt) - tt = _time.localtime(stamp) - return tt.tm_isdst > 0 - - -class _Zone(object): - - def tz_or_local(self, tzinfo=None): - if tzinfo is None: - return self.local - return self.get_timezone(tzinfo) - - def to_local(self, dt, local=None, orig=None): - if is_naive(dt): - dt = make_aware(dt, orig or self.utc) - return localize(dt, self.tz_or_local(local)) - - if PY33: - - def to_system(self, dt): - # tz=None is a special case since Python 3.3, and will - # convert to the current local timezone (Issue #2306). - return dt.astimezone(tz=None) - - else: - - def to_system(self, dt): # noqa - return localize(dt, self.local) - - def to_local_fallback(self, dt): - if is_naive(dt): - return make_aware(dt, self.local) - return localize(dt, self.local) - - def get_timezone(self, zone): - if isinstance(zone, string_t): - return _timezone(zone) - return zone - - @cached_property - def local(self): - return LocalTimezone() - - @cached_property - def utc(self): - return self.get_timezone('UTC') -timezone = _Zone() - - -def maybe_timedelta(delta): - """Coerces integer to timedelta if `delta` is an integer.""" - if isinstance(delta, numbers.Real): - return timedelta(seconds=delta) - return delta - - -def delta_resolution(dt, delta): - """Round a datetime to the resolution of a timedelta. - - If the timedelta is in days, the datetime will be rounded - to the nearest days, if the timedelta is in hours the datetime - will be rounded to the nearest hour, and so on until seconds - which will just return the original datetime. - - """ - delta = timedelta_seconds(delta) - - resolutions = ((3, lambda x: x / 86400), - (4, lambda x: x / 3600), - (5, lambda x: x / 60)) - - args = dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second - for res, predicate in resolutions: - if predicate(delta) >= 1.0: - return datetime(*args[:res], tzinfo=dt.tzinfo) - return dt - - -def remaining(start, ends_in, now=None, relative=False): - """Calculate the remaining time for a start date and a timedelta. - - e.g. "how many seconds left for 30 seconds after start?" - - :param start: Start :class:`~datetime.datetime`. - :param ends_in: The end delta as a :class:`~datetime.timedelta`. - :keyword relative: If enabled the end time will be - calculated using :func:`delta_resolution` (i.e. rounded to the - resolution of `ends_in`). - :keyword now: Function returning the current time and date, - defaults to :func:`datetime.utcnow`. - - """ - now = now or datetime.utcnow() - end_date = start + ends_in - if relative: - end_date = delta_resolution(end_date, ends_in) - ret = end_date - now - if C_REMDEBUG: # pragma: no cover - print('rem: NOW:%r START:%r ENDS_IN:%r END_DATE:%s REM:%s' % ( - now, start, ends_in, end_date, ret)) - return ret - - -def rate(rate): - """Parse rate strings, such as `"100/m"`, `"2/h"` or `"0.5/s"` - and convert them to seconds.""" - if rate: - if isinstance(rate, string_t): - ops, _, modifier = rate.partition('/') - return RATE_MODIFIER_MAP[modifier or 's'](float(ops)) or 0 - return rate or 0 - return 0 - - -def weekday(name): - """Return the position of a weekday (0 - 7, where 0 is Sunday). - - Example:: - - >>> weekday('sunday'), weekday('sun'), weekday('mon') - (0, 0, 1) - - """ - abbreviation = name[0:3].lower() - try: - return WEEKDAYS[abbreviation] - except KeyError: - # Show original day name in exception, instead of abbr. - raise KeyError(name) - - -def humanize_seconds(secs, prefix='', sep='', now='now'): - """Show seconds in human form, e.g. 60 is "1 minute", 7200 is "2 - hours". - - :keyword prefix: Can be used to add a preposition to the output, - e.g. 'in' will give 'in 1 second', but add nothing to 'now'. - - """ - secs = float(secs) - for unit, divider, formatter in TIME_UNITS: - if secs >= divider: - w = secs / divider - return '{0}{1}{2} {3}'.format(prefix, sep, formatter(w), - pluralize(w, unit)) - return now - - -def maybe_iso8601(dt): - """`Either datetime | str -> datetime or None -> None`""" - if not dt: - return - if isinstance(dt, datetime): - return dt - return parse_iso8601(dt) - - -def is_naive(dt): - """Return :const:`True` if the datetime is naive - (does not have timezone information).""" - return dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None - - -def make_aware(dt, tz): - """Sets the timezone for a datetime object.""" - try: - _localize = tz.localize - except AttributeError: - return dt.replace(tzinfo=tz) - else: - # works on pytz timezones - try: - return _localize(dt, is_dst=None) - except AmbiguousTimeError: - return min(_localize(dt, is_dst=True), - _localize(dt, is_dst=False)) - - -def localize(dt, tz): - """Convert aware datetime to another timezone.""" - dt = dt.astimezone(tz) - try: - _normalize = tz.normalize - except AttributeError: # non-pytz tz - return dt - else: - try: - return _normalize(dt, is_dst=None) - except TypeError: - return _normalize(dt) - except AmbiguousTimeError: - return min(_normalize(dt, is_dst=True), - _normalize(dt, is_dst=False)) - - -def to_utc(dt): - """Converts naive datetime to UTC""" - return make_aware(dt, timezone.utc) - - -def maybe_make_aware(dt, tz=None): - if is_naive(dt): - dt = to_utc(dt) - return localize( - dt, timezone.utc if tz is None else timezone.tz_or_local(tz), - ) - - -class ffwd(object): - """Version of relativedelta that only supports addition.""" - - def __init__(self, year=None, month=None, weeks=0, weekday=None, day=None, - hour=None, minute=None, second=None, microsecond=None, - **kwargs): - self.year = year - self.month = month - self.weeks = weeks - self.weekday = weekday - self.day = day - self.hour = hour - self.minute = minute - self.second = second - self.microsecond = microsecond - self.days = weeks * 7 - self._has_time = self.hour is not None or self.minute is not None - - def __repr__(self): - return reprcall('ffwd', (), self._fields(weeks=self.weeks, - weekday=self.weekday)) - - def __radd__(self, other): - if not isinstance(other, date): - return NotImplemented - year = self.year or other.year - month = self.month or other.month - day = min(monthrange(year, month)[1], self.day or other.day) - ret = other.replace(**dict(dictfilter(self._fields()), - year=year, month=month, day=day)) - if self.weekday is not None: - ret += timedelta(days=(7 - ret.weekday() + self.weekday) % 7) - return ret + timedelta(days=self.days) - - def _fields(self, **extra): - return dictfilter({ - 'year': self.year, 'month': self.month, 'day': self.day, - 'hour': self.hour, 'minute': self.minute, - 'second': self.second, 'microsecond': self.microsecond, - }, **extra) - - -def utcoffset(time=_time, localtime=_time.localtime): - if localtime().tm_isdst: - return time.altzone // 3600 - return time.timezone // 3600 - - -def adjust_timestamp(ts, offset, here=utcoffset): - return ts - (offset - here()) * 3600 - - -def maybe_s_to_ms(v): - return int(float(v) * 1000.0) if v is not None else v diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/worker/__init__.py deleted file mode 100644 index 3d65dd1..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/__init__.py +++ /dev/null @@ -1,393 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.worker - ~~~~~~~~~~~~~ - - :class:`WorkController` can be used to instantiate in-process workers. - - The worker consists of several components, all managed by bootsteps - (mod:`celery.bootsteps`). - -""" -from __future__ import absolute_import - -import os -import sys -import traceback -try: - import resource -except ImportError: # pragma: no cover - resource = None # noqa - -from billiard import cpu_count -from billiard.util import Finalize -from kombu.syn import detect_environment - -from celery import bootsteps -from celery.bootsteps import RUN, TERMINATE -from celery import concurrency as _concurrency -from celery import platforms -from celery import signals -from celery.exceptions import ( - ImproperlyConfigured, WorkerTerminate, TaskRevokedError, -) -from celery.five import string_t, values -from celery.utils import default_nodename, worker_direct -from celery.utils.imports import reload_from_cwd -from celery.utils.log import mlevel, worker_logger as logger -from celery.utils.threads import default_socket_timeout - -from . import state - -__all__ = ['WorkController', 'default_nodename'] - -#: Default socket timeout at shutdown. -SHUTDOWN_SOCKET_TIMEOUT = 5.0 - -SELECT_UNKNOWN_QUEUE = """\ -Trying to select queue subset of {0!r}, but queue {1} is not -defined in the CELERY_QUEUES setting. - -If you want to automatically declare unknown queues you can -enable the CELERY_CREATE_MISSING_QUEUES setting. -""" - -DESELECT_UNKNOWN_QUEUE = """\ -Trying to deselect queue subset of {0!r}, but queue {1} is not -defined in the CELERY_QUEUES setting. -""" - - -def str_to_list(s): - if isinstance(s, string_t): - return s.split(',') - return s - - -class WorkController(object): - """Unmanaged worker instance.""" - app = None - - pidlock = None - blueprint = None - pool = None - semaphore = None - - class Blueprint(bootsteps.Blueprint): - """Worker bootstep blueprint.""" - name = 'Worker' - default_steps = set([ - 'celery.worker.components:Hub', - 'celery.worker.components:Queues', - 'celery.worker.components:Pool', - 'celery.worker.components:Beat', - 'celery.worker.components:Timer', - 'celery.worker.components:StateDB', - 'celery.worker.components:Consumer', - 'celery.worker.autoscale:WorkerComponent', - 'celery.worker.autoreload:WorkerComponent', - - ]) - - def __init__(self, app=None, hostname=None, **kwargs): - self.app = app or self.app - self.hostname = default_nodename(hostname) - self.app.loader.init_worker() - self.on_before_init(**kwargs) - self.setup_defaults(**kwargs) - self.on_after_init(**kwargs) - - self.setup_instance(**self.prepare_args(**kwargs)) - self._finalize = [ - Finalize(self, self._send_worker_shutdown, exitpriority=10), - ] - - def setup_instance(self, queues=None, ready_callback=None, pidfile=None, - include=None, use_eventloop=None, exclude_queues=None, - **kwargs): - self.pidfile = pidfile - self.setup_queues(queues, exclude_queues) - self.setup_includes(str_to_list(include)) - - # Set default concurrency - if not self.concurrency: - try: - self.concurrency = cpu_count() - except NotImplementedError: - self.concurrency = 2 - - # Options - self.loglevel = mlevel(self.loglevel) - self.ready_callback = ready_callback or self.on_consumer_ready - - # this connection is not established, only used for params - self._conninfo = self.app.connection() - self.use_eventloop = ( - self.should_use_eventloop() if use_eventloop is None - else use_eventloop - ) - self.options = kwargs - - signals.worker_init.send(sender=self) - - # Initialize bootsteps - self.pool_cls = _concurrency.get_implementation(self.pool_cls) - self.steps = [] - self.on_init_blueprint() - self.blueprint = self.Blueprint(app=self.app, - on_start=self.on_start, - on_close=self.on_close, - on_stopped=self.on_stopped) - self.blueprint.apply(self, **kwargs) - - def on_init_blueprint(self): - pass - - def on_before_init(self, **kwargs): - pass - - def on_after_init(self, **kwargs): - pass - - def on_start(self): - if self.pidfile: - self.pidlock = platforms.create_pidlock(self.pidfile) - - def on_consumer_ready(self, consumer): - pass - - def on_close(self): - self.app.loader.shutdown_worker() - - def on_stopped(self): - self.timer.stop() - self.consumer.shutdown() - - if self.pidlock: - self.pidlock.release() - - def setup_queues(self, include, exclude=None): - include = str_to_list(include) - exclude = str_to_list(exclude) - try: - self.app.amqp.queues.select(include) - except KeyError as exc: - raise ImproperlyConfigured( - SELECT_UNKNOWN_QUEUE.format(include, exc)) - try: - self.app.amqp.queues.deselect(exclude) - except KeyError as exc: - raise ImproperlyConfigured( - DESELECT_UNKNOWN_QUEUE.format(exclude, exc)) - if self.app.conf.CELERY_WORKER_DIRECT: - self.app.amqp.queues.select_add(worker_direct(self.hostname)) - - def setup_includes(self, includes): - # Update celery_include to have all known task modules, so that we - # ensure all task modules are imported in case an execv happens. - prev = tuple(self.app.conf.CELERY_INCLUDE) - if includes: - prev += tuple(includes) - [self.app.loader.import_task_module(m) for m in includes] - self.include = includes - task_modules = set(task.__class__.__module__ - for task in values(self.app.tasks)) - self.app.conf.CELERY_INCLUDE = tuple(set(prev) | task_modules) - - def prepare_args(self, **kwargs): - return kwargs - - def _send_worker_shutdown(self): - signals.worker_shutdown.send(sender=self) - - def start(self): - """Starts the workers main loop.""" - try: - self.blueprint.start(self) - except WorkerTerminate: - self.terminate() - except Exception as exc: - logger.error('Unrecoverable error: %r', exc, exc_info=True) - self.stop() - except (KeyboardInterrupt, SystemExit): - self.stop() - - def register_with_event_loop(self, hub): - self.blueprint.send_all( - self, 'register_with_event_loop', args=(hub, ), - description='hub.register', - ) - - def _process_task_sem(self, req): - return self._quick_acquire(self._process_task, req) - - def _process_task(self, req): - """Process task by sending it to the pool of workers.""" - try: - req.execute_using_pool(self.pool) - except TaskRevokedError: - try: - self._quick_release() # Issue 877 - except AttributeError: - pass - except Exception as exc: - logger.critical('Internal error: %r\n%s', - exc, traceback.format_exc(), exc_info=True) - - def signal_consumer_close(self): - try: - self.consumer.close() - except AttributeError: - pass - - def should_use_eventloop(self): - return (detect_environment() == 'default' and - self._conninfo.is_evented and not self.app.IS_WINDOWS) - - def stop(self, in_sighandler=False): - """Graceful shutdown of the worker server.""" - if self.blueprint.state == RUN: - self.signal_consumer_close() - if not in_sighandler or self.pool.signal_safe: - self._shutdown(warm=True) - - def terminate(self, in_sighandler=False): - """Not so graceful shutdown of the worker server.""" - if self.blueprint.state != TERMINATE: - self.signal_consumer_close() - if not in_sighandler or self.pool.signal_safe: - self._shutdown(warm=False) - - def _shutdown(self, warm=True): - # if blueprint does not exist it means that we had an - # error before the bootsteps could be initialized. - if self.blueprint is not None: - with default_socket_timeout(SHUTDOWN_SOCKET_TIMEOUT): # Issue 975 - self.blueprint.stop(self, terminate=not warm) - self.blueprint.join() - - def reload(self, modules=None, reload=False, reloader=None): - modules = self.app.loader.task_modules if modules is None else modules - imp = self.app.loader.import_from_cwd - - for module in set(modules or ()): - if module not in sys.modules: - logger.debug('importing module %s', module) - imp(module) - elif reload: - logger.debug('reloading module %s', module) - reload_from_cwd(sys.modules[module], reloader) - - if self.consumer: - self.consumer.update_strategies() - self.consumer.reset_rate_limits() - try: - self.pool.restart() - except NotImplementedError: - pass - - def info(self): - return {'total': self.state.total_count, - 'pid': os.getpid(), - 'clock': str(self.app.clock)} - - def rusage(self): - if resource is None: - raise NotImplementedError('rusage not supported by this platform') - s = resource.getrusage(resource.RUSAGE_SELF) - return { - 'utime': s.ru_utime, - 'stime': s.ru_stime, - 'maxrss': s.ru_maxrss, - 'ixrss': s.ru_ixrss, - 'idrss': s.ru_idrss, - 'isrss': s.ru_isrss, - 'minflt': s.ru_minflt, - 'majflt': s.ru_majflt, - 'nswap': s.ru_nswap, - 'inblock': s.ru_inblock, - 'oublock': s.ru_oublock, - 'msgsnd': s.ru_msgsnd, - 'msgrcv': s.ru_msgrcv, - 'nsignals': s.ru_nsignals, - 'nvcsw': s.ru_nvcsw, - 'nivcsw': s.ru_nivcsw, - } - - def stats(self): - info = self.info() - info.update(self.blueprint.info(self)) - info.update(self.consumer.blueprint.info(self.consumer)) - try: - info['rusage'] = self.rusage() - except NotImplementedError: - info['rusage'] = 'N/A' - return info - - def __repr__(self): - return ''.format( - self=self, - state=(self.blueprint.human_state() - if self.blueprint else 'initializing'), # Issue #2514 - ) - - def __str__(self): - return self.hostname - - @property - def state(self): - return state - - def setup_defaults(self, concurrency=None, loglevel=None, logfile=None, - send_events=None, pool_cls=None, consumer_cls=None, - timer_cls=None, timer_precision=None, - autoscaler_cls=None, autoreloader_cls=None, - pool_putlocks=None, pool_restarts=None, - force_execv=None, state_db=None, - schedule_filename=None, scheduler_cls=None, - task_time_limit=None, task_soft_time_limit=None, - max_tasks_per_child=None, prefetch_multiplier=None, - disable_rate_limits=None, worker_lost_wait=None, **_kw): - self.concurrency = self._getopt('concurrency', concurrency) - self.loglevel = self._getopt('log_level', loglevel) - self.logfile = self._getopt('log_file', logfile) - self.send_events = self._getopt('send_events', send_events) - self.pool_cls = self._getopt('pool', pool_cls) - self.consumer_cls = self._getopt('consumer', consumer_cls) - self.timer_cls = self._getopt('timer', timer_cls) - self.timer_precision = self._getopt('timer_precision', timer_precision) - self.autoscaler_cls = self._getopt('autoscaler', autoscaler_cls) - self.autoreloader_cls = self._getopt('autoreloader', autoreloader_cls) - self.pool_putlocks = self._getopt('pool_putlocks', pool_putlocks) - self.pool_restarts = self._getopt('pool_restarts', pool_restarts) - self.force_execv = self._getopt('force_execv', force_execv) - self.state_db = self._getopt('state_db', state_db) - self.schedule_filename = self._getopt( - 'schedule_filename', schedule_filename, - ) - self.scheduler_cls = self._getopt( - 'celerybeat_scheduler', scheduler_cls, - ) - self.task_time_limit = self._getopt( - 'task_time_limit', task_time_limit, - ) - self.task_soft_time_limit = self._getopt( - 'task_soft_time_limit', task_soft_time_limit, - ) - self.max_tasks_per_child = self._getopt( - 'max_tasks_per_child', max_tasks_per_child, - ) - self.prefetch_multiplier = int(self._getopt( - 'prefetch_multiplier', prefetch_multiplier, - )) - self.disable_rate_limits = self._getopt( - 'disable_rate_limits', disable_rate_limits, - ) - self.worker_lost_wait = self._getopt( - 'worker_lost_wait', worker_lost_wait, - ) - - def _getopt(self, key, value): - if value is not None: - return value - return self.app.conf.find_value_for_key(key, namespace='celeryd') diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/autoreload.py b/thesisenv/lib/python3.6/site-packages/celery/worker/autoreload.py deleted file mode 100644 index 8ade32f..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/autoreload.py +++ /dev/null @@ -1,302 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.worker.autoreload - ~~~~~~~~~~~~~~~~~~~~~~~~ - - This module implements automatic module reloading -""" -from __future__ import absolute_import - -import hashlib -import os -import select -import sys -import time - -from collections import defaultdict -from threading import Event - -from kombu.utils import eventio -from kombu.utils.encoding import ensure_bytes - -from celery import bootsteps -from celery.five import items -from celery.platforms import ignore_errno -from celery.utils.imports import module_file -from celery.utils.log import get_logger -from celery.utils.threads import bgThread - -from .components import Pool - -try: # pragma: no cover - import pyinotify - _ProcessEvent = pyinotify.ProcessEvent -except ImportError: # pragma: no cover - pyinotify = None # noqa - _ProcessEvent = object # noqa - -__all__ = [ - 'WorkerComponent', 'Autoreloader', 'Monitor', 'BaseMonitor', - 'StatMonitor', 'KQueueMonitor', 'InotifyMonitor', 'file_hash', -] - -logger = get_logger(__name__) - - -class WorkerComponent(bootsteps.StartStopStep): - label = 'Autoreloader' - conditional = True - requires = (Pool, ) - - def __init__(self, w, autoreload=None, **kwargs): - self.enabled = w.autoreload = autoreload - w.autoreloader = None - - def create(self, w): - w.autoreloader = self.instantiate(w.autoreloader_cls, w) - return w.autoreloader if not w.use_eventloop else None - - def register_with_event_loop(self, w, hub): - w.autoreloader.register_with_event_loop(hub) - hub.on_close.add(w.autoreloader.on_event_loop_close) - - -def file_hash(filename, algorithm='md5'): - hobj = hashlib.new(algorithm) - with open(filename, 'rb') as f: - for chunk in iter(lambda: f.read(2 ** 20), ''): - hobj.update(ensure_bytes(chunk)) - return hobj.digest() - - -class BaseMonitor(object): - - def __init__(self, files, - on_change=None, shutdown_event=None, interval=0.5): - self.files = files - self.interval = interval - self._on_change = on_change - self.modify_times = defaultdict(int) - self.shutdown_event = shutdown_event or Event() - - def start(self): - raise NotImplementedError('Subclass responsibility') - - def stop(self): - pass - - def on_change(self, modified): - if self._on_change: - return self._on_change(modified) - - def on_event_loop_close(self, hub): - pass - - -class StatMonitor(BaseMonitor): - """File change monitor based on the ``stat`` system call.""" - - def _mtimes(self): - return ((f, self._mtime(f)) for f in self.files) - - def _maybe_modified(self, f, mt): - return mt is not None and self.modify_times[f] != mt - - def register_with_event_loop(self, hub): - hub.call_repeatedly(2.0, self.find_changes) - - def find_changes(self): - maybe_modified = self._maybe_modified - modified = dict((f, mt) for f, mt in self._mtimes() - if maybe_modified(f, mt)) - if modified: - self.on_change(modified) - self.modify_times.update(modified) - - def start(self): - while not self.shutdown_event.is_set(): - self.find_changes() - time.sleep(self.interval) - - @staticmethod - def _mtime(path): - try: - return os.stat(path).st_mtime - except Exception: - pass - - -class KQueueMonitor(BaseMonitor): - """File change monitor based on BSD kernel event notifications""" - - def __init__(self, *args, **kwargs): - super(KQueueMonitor, self).__init__(*args, **kwargs) - self.filemap = dict((f, None) for f in self.files) - self.fdmap = {} - - def register_with_event_loop(self, hub): - if eventio.kqueue is not None: - self._kq = eventio._kqueue() - self.add_events(self._kq) - self._kq.on_file_change = self.handle_event - hub.add_reader(self._kq._kqueue, self._kq.poll, 0) - - def on_event_loop_close(self, hub): - self.close(self._kq) - - def add_events(self, poller): - for f in self.filemap: - self.filemap[f] = fd = os.open(f, os.O_RDONLY) - self.fdmap[fd] = f - poller.watch_file(fd) - - def handle_event(self, events): - self.on_change([self.fdmap[e.ident] for e in events]) - - def start(self): - self.poller = eventio.poll() - self.add_events(self.poller) - self.poller.on_file_change = self.handle_event - while not self.shutdown_event.is_set(): - self.poller.poll(1) - - def close(self, poller): - for f, fd in items(self.filemap): - if fd is not None: - poller.unregister(fd) - with ignore_errno('EBADF'): # pragma: no cover - os.close(fd) - self.filemap.clear() - self.fdmap.clear() - - def stop(self): - self.close(self.poller) - self.poller.close() - - -class InotifyMonitor(_ProcessEvent): - """File change monitor based on Linux kernel `inotify` subsystem""" - - def __init__(self, modules, on_change=None, **kwargs): - assert pyinotify - self._modules = modules - self._on_change = on_change - self._wm = None - self._notifier = None - - def register_with_event_loop(self, hub): - self.create_notifier() - hub.add_reader(self._wm.get_fd(), self.on_readable) - - def on_event_loop_close(self, hub): - pass - - def on_readable(self): - self._notifier.read_events() - self._notifier.process_events() - - def create_notifier(self): - self._wm = pyinotify.WatchManager() - self._notifier = pyinotify.Notifier(self._wm, self) - add_watch = self._wm.add_watch - flags = pyinotify.IN_MODIFY | pyinotify.IN_ATTRIB - for m in self._modules: - add_watch(m, flags) - - def start(self): - try: - self.create_notifier() - self._notifier.loop() - finally: - if self._wm: - self._wm.close() - # Notifier.close is called at the end of Notifier.loop - self._wm = self._notifier = None - - def stop(self): - pass - - def process_(self, event): - self.on_change([event.path]) - - process_IN_ATTRIB = process_IN_MODIFY = process_ - - def on_change(self, modified): - if self._on_change: - return self._on_change(modified) - - -def default_implementation(): - if hasattr(select, 'kqueue') and eventio.kqueue is not None: - return 'kqueue' - elif sys.platform.startswith('linux') and pyinotify: - return 'inotify' - else: - return 'stat' - -implementations = {'kqueue': KQueueMonitor, - 'inotify': InotifyMonitor, - 'stat': StatMonitor} -Monitor = implementations[ - os.environ.get('CELERYD_FSNOTIFY') or default_implementation()] - - -class Autoreloader(bgThread): - """Tracks changes in modules and fires reload commands""" - Monitor = Monitor - - def __init__(self, controller, modules=None, monitor_cls=None, **options): - super(Autoreloader, self).__init__() - self.controller = controller - app = self.controller.app - self.modules = app.loader.task_modules if modules is None else modules - self.options = options - self._monitor = None - self._hashes = None - self.file_to_module = {} - - def on_init(self): - files = self.file_to_module - files.update(dict( - (module_file(sys.modules[m]), m) for m in self.modules)) - - self._monitor = self.Monitor( - files, self.on_change, - shutdown_event=self._is_shutdown, **self.options) - self._hashes = dict([(f, file_hash(f)) for f in files]) - - def register_with_event_loop(self, hub): - if self._monitor is None: - self.on_init() - self._monitor.register_with_event_loop(hub) - - def on_event_loop_close(self, hub): - if self._monitor is not None: - self._monitor.on_event_loop_close(hub) - - def body(self): - self.on_init() - with ignore_errno('EINTR', 'EAGAIN'): - self._monitor.start() - - def _maybe_modified(self, f): - if os.path.exists(f): - digest = file_hash(f) - if digest != self._hashes[f]: - self._hashes[f] = digest - return True - return False - - def on_change(self, files): - modified = [f for f in files if self._maybe_modified(f)] - if modified: - names = [self.file_to_module[module] for module in modified] - logger.info('Detected modified modules: %r', names) - self._reload(names) - - def _reload(self, modules): - self.controller.reload(modules, reload=True) - - def stop(self): - if self._monitor: - self._monitor.stop() diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/autoscale.py b/thesisenv/lib/python3.6/site-packages/celery/worker/autoscale.py deleted file mode 100644 index 265feda..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/autoscale.py +++ /dev/null @@ -1,162 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.worker.autoscale - ~~~~~~~~~~~~~~~~~~~~~~~ - - This module implements the internal thread responsible - for growing and shrinking the pool according to the - current autoscale settings. - - The autoscale thread is only enabled if :option:`--autoscale` - has been enabled on the command-line. - -""" -from __future__ import absolute_import - -import os -import threading - -from time import sleep - -from kombu.async.semaphore import DummyLock - -from celery import bootsteps -from celery.five import monotonic -from celery.utils.log import get_logger -from celery.utils.threads import bgThread - -from . import state -from .components import Pool - -__all__ = ['Autoscaler', 'WorkerComponent'] - -logger = get_logger(__name__) -debug, info, error = logger.debug, logger.info, logger.error - -AUTOSCALE_KEEPALIVE = float(os.environ.get('AUTOSCALE_KEEPALIVE', 30)) - - -class WorkerComponent(bootsteps.StartStopStep): - label = 'Autoscaler' - conditional = True - requires = (Pool, ) - - def __init__(self, w, **kwargs): - self.enabled = w.autoscale - w.autoscaler = None - - def create(self, w): - scaler = w.autoscaler = self.instantiate( - w.autoscaler_cls, - w.pool, w.max_concurrency, w.min_concurrency, - worker=w, mutex=DummyLock() if w.use_eventloop else None, - ) - return scaler if not w.use_eventloop else None - - def register_with_event_loop(self, w, hub): - w.consumer.on_task_message.add(w.autoscaler.maybe_scale) - hub.call_repeatedly( - w.autoscaler.keepalive, w.autoscaler.maybe_scale, - ) - - -class Autoscaler(bgThread): - - def __init__(self, pool, max_concurrency, - min_concurrency=0, worker=None, - keepalive=AUTOSCALE_KEEPALIVE, mutex=None): - super(Autoscaler, self).__init__() - self.pool = pool - self.mutex = mutex or threading.Lock() - self.max_concurrency = max_concurrency - self.min_concurrency = min_concurrency - self.keepalive = keepalive - self._last_action = None - self.worker = worker - - assert self.keepalive, 'cannot scale down too fast.' - - def body(self): - with self.mutex: - self.maybe_scale() - sleep(1.0) - - def _maybe_scale(self, req=None): - procs = self.processes - cur = min(self.qty, self.max_concurrency) - if cur > procs: - self.scale_up(cur - procs) - return True - elif cur < procs: - self.scale_down((procs - cur) - self.min_concurrency) - return True - - def maybe_scale(self, req=None): - if self._maybe_scale(req): - self.pool.maintain_pool() - - def update(self, max=None, min=None): - with self.mutex: - if max is not None: - if max < self.max_concurrency: - self._shrink(self.processes - max) - self.max_concurrency = max - if min is not None: - if min > self.min_concurrency: - self._grow(min - self.min_concurrency) - self.min_concurrency = min - return self.max_concurrency, self.min_concurrency - - def force_scale_up(self, n): - with self.mutex: - new = self.processes + n - if new > self.max_concurrency: - self.max_concurrency = new - self.min_concurrency += 1 - self._grow(n) - - def force_scale_down(self, n): - with self.mutex: - new = self.processes - n - if new < self.min_concurrency: - self.min_concurrency = max(new, 0) - self._shrink(min(n, self.processes)) - - def scale_up(self, n): - self._last_action = monotonic() - return self._grow(n) - - def scale_down(self, n): - if n and self._last_action and ( - monotonic() - self._last_action > self.keepalive): - self._last_action = monotonic() - return self._shrink(n) - - def _grow(self, n): - info('Scaling up %s processes.', n) - self.pool.grow(n) - self.worker.consumer._update_prefetch_count(n) - - def _shrink(self, n): - info('Scaling down %s processes.', n) - try: - self.pool.shrink(n) - except ValueError: - debug("Autoscaler won't scale down: all processes busy.") - except Exception as exc: - error('Autoscaler: scale_down: %r', exc, exc_info=True) - self.worker.consumer._update_prefetch_count(-n) - - def info(self): - return {'max': self.max_concurrency, - 'min': self.min_concurrency, - 'current': self.processes, - 'qty': self.qty} - - @property - def qty(self): - return len(state.reserved_requests) - - @property - def processes(self): - return self.pool.num_processes diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/components.py b/thesisenv/lib/python3.6/site-packages/celery/worker/components.py deleted file mode 100644 index bb02f4e..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/components.py +++ /dev/null @@ -1,247 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.worker.components - ~~~~~~~~~~~~~~~~~~~~~~~~ - - Default worker bootsteps. - -""" -from __future__ import absolute_import - -import atexit -import warnings - -from kombu.async import Hub as _Hub, get_event_loop, set_event_loop -from kombu.async.semaphore import DummyLock, LaxBoundedSemaphore -from kombu.async.timer import Timer as _Timer - -from celery import bootsteps -from celery._state import _set_task_join_will_block -from celery.exceptions import ImproperlyConfigured -from celery.five import string_t -from celery.utils.log import worker_logger as logger - -__all__ = ['Timer', 'Hub', 'Queues', 'Pool', 'Beat', 'StateDB', 'Consumer'] - -ERR_B_GREEN = """\ --B option doesn't work with eventlet/gevent pools: \ -use standalone beat instead.\ -""" - -W_POOL_SETTING = """ -The CELERYD_POOL setting should not be used to select the eventlet/gevent -pools, instead you *must use the -P* argument so that patches are applied -as early as possible. -""" - - -class Timer(bootsteps.Step): - """This step initializes the internal timer used by the worker.""" - - def create(self, w): - if w.use_eventloop: - # does not use dedicated timer thread. - w.timer = _Timer(max_interval=10.0) - else: - if not w.timer_cls: - # Default Timer is set by the pool, as e.g. eventlet - # needs a custom implementation. - w.timer_cls = w.pool_cls.Timer - w.timer = self.instantiate(w.timer_cls, - max_interval=w.timer_precision, - on_timer_error=self.on_timer_error, - on_timer_tick=self.on_timer_tick) - - def on_timer_error(self, exc): - logger.error('Timer error: %r', exc, exc_info=True) - - def on_timer_tick(self, delay): - logger.debug('Timer wake-up! Next eta %s secs.', delay) - - -class Hub(bootsteps.StartStopStep): - requires = (Timer, ) - - def __init__(self, w, **kwargs): - w.hub = None - - def include_if(self, w): - return w.use_eventloop - - def create(self, w): - w.hub = get_event_loop() - if w.hub is None: - w.hub = set_event_loop(_Hub(w.timer)) - self._patch_thread_primitives(w) - return self - - def start(self, w): - pass - - def stop(self, w): - w.hub.close() - - def terminate(self, w): - w.hub.close() - - def _patch_thread_primitives(self, w): - # make clock use dummy lock - w.app.clock.mutex = DummyLock() - # multiprocessing's ApplyResult uses this lock. - try: - from billiard import pool - except ImportError: - pass - else: - pool.Lock = DummyLock - - -class Queues(bootsteps.Step): - """This bootstep initializes the internal queues - used by the worker.""" - label = 'Queues (intra)' - requires = (Hub, ) - - def create(self, w): - w.process_task = w._process_task - if w.use_eventloop: - if w.pool_putlocks and w.pool_cls.uses_semaphore: - w.process_task = w._process_task_sem - - -class Pool(bootsteps.StartStopStep): - """Bootstep managing the worker pool. - - Describes how to initialize the worker pool, and starts and stops - the pool during worker startup/shutdown. - - Adds attributes: - - * autoscale - * pool - * max_concurrency - * min_concurrency - - """ - requires = (Queues, ) - - def __init__(self, w, autoscale=None, autoreload=None, - no_execv=False, optimization=None, **kwargs): - if isinstance(autoscale, string_t): - max_c, _, min_c = autoscale.partition(',') - autoscale = [int(max_c), min_c and int(min_c) or 0] - w.autoscale = autoscale - w.pool = None - w.max_concurrency = None - w.min_concurrency = w.concurrency - w.no_execv = no_execv - if w.autoscale: - w.max_concurrency, w.min_concurrency = w.autoscale - self.autoreload_enabled = autoreload - self.optimization = optimization - - def close(self, w): - if w.pool: - w.pool.close() - - def terminate(self, w): - if w.pool: - w.pool.terminate() - - def create(self, w, semaphore=None, max_restarts=None): - if w.app.conf.CELERYD_POOL in ('eventlet', 'gevent'): - warnings.warn(UserWarning(W_POOL_SETTING)) - threaded = not w.use_eventloop - procs = w.min_concurrency - forking_enable = w.no_execv if w.force_execv else True - if not threaded: - semaphore = w.semaphore = LaxBoundedSemaphore(procs) - w._quick_acquire = w.semaphore.acquire - w._quick_release = w.semaphore.release - max_restarts = 100 - allow_restart = self.autoreload_enabled or w.pool_restarts - pool = w.pool = self.instantiate( - w.pool_cls, w.min_concurrency, - initargs=(w.app, w.hostname), - maxtasksperchild=w.max_tasks_per_child, - timeout=w.task_time_limit, - soft_timeout=w.task_soft_time_limit, - putlocks=w.pool_putlocks and threaded, - lost_worker_timeout=w.worker_lost_wait, - threads=threaded, - max_restarts=max_restarts, - allow_restart=allow_restart, - forking_enable=forking_enable, - semaphore=semaphore, - sched_strategy=self.optimization, - ) - _set_task_join_will_block(pool.task_join_will_block) - return pool - - def info(self, w): - return {'pool': w.pool.info if w.pool else 'N/A'} - - def register_with_event_loop(self, w, hub): - w.pool.register_with_event_loop(hub) - - -class Beat(bootsteps.StartStopStep): - """Step used to embed a beat process. - - This will only be enabled if the ``beat`` - argument is set. - - """ - label = 'Beat' - conditional = True - - def __init__(self, w, beat=False, **kwargs): - self.enabled = w.beat = beat - w.beat = None - - def create(self, w): - from celery.beat import EmbeddedService - if w.pool_cls.__module__.endswith(('gevent', 'eventlet')): - raise ImproperlyConfigured(ERR_B_GREEN) - b = w.beat = EmbeddedService(w.app, - schedule_filename=w.schedule_filename, - scheduler_cls=w.scheduler_cls) - return b - - -class StateDB(bootsteps.Step): - """This bootstep sets up the workers state db if enabled.""" - - def __init__(self, w, **kwargs): - self.enabled = w.state_db - w._persistence = None - - def create(self, w): - w._persistence = w.state.Persistent(w.state, w.state_db, w.app.clock) - atexit.register(w._persistence.save) - - -class Consumer(bootsteps.StartStopStep): - last = True - - def create(self, w): - if w.max_concurrency: - prefetch_count = max(w.min_concurrency, 1) * w.prefetch_multiplier - else: - prefetch_count = w.concurrency * w.prefetch_multiplier - c = w.consumer = self.instantiate( - w.consumer_cls, w.process_task, - hostname=w.hostname, - send_events=w.send_events, - init_callback=w.ready_callback, - initial_prefetch_count=prefetch_count, - pool=w.pool, - timer=w.timer, - app=w.app, - controller=w, - hub=w.hub, - worker_options=w.options, - disable_rate_limits=w.disable_rate_limits, - prefetch_multiplier=w.prefetch_multiplier, - ) - return c diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/consumer.py b/thesisenv/lib/python3.6/site-packages/celery/worker/consumer.py deleted file mode 100644 index cc93d6c..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/consumer.py +++ /dev/null @@ -1,887 +0,0 @@ -# -*- coding: utf-8 -*- -""" -celery.worker.consumer -~~~~~~~~~~~~~~~~~~~~~~ - -This module contains the components responsible for consuming messages -from the broker, processing the messages and keeping the broker connections -up and running. - -""" -from __future__ import absolute_import - -import errno -import kombu -import logging -import os -import socket - -from collections import defaultdict -from functools import partial -from heapq import heappush -from operator import itemgetter -from time import sleep - -from billiard.common import restart_state -from billiard.exceptions import RestartFreqExceeded -from kombu.async.semaphore import DummyLock -from kombu.common import QoS, ignore_errors -from kombu.syn import _detect_environment -from kombu.utils.compat import get_errno -from kombu.utils.encoding import safe_repr, bytes_t -from kombu.utils.limits import TokenBucket - -from celery import chain -from celery import bootsteps -from celery.app.trace import build_tracer -from celery.canvas import signature -from celery.exceptions import InvalidTaskError -from celery.five import items, values -from celery.utils.functional import noop -from celery.utils.log import get_logger -from celery.utils.objects import Bunch -from celery.utils.text import truncate -from celery.utils.timeutils import humanize_seconds, rate - -from . import heartbeat, loops, pidbox -from .state import task_reserved, maybe_shutdown, revoked, reserved_requests - -try: - buffer_t = buffer -except NameError: # pragma: no cover - # Py3 does not have buffer, but we only need isinstance. - - class buffer_t(object): # noqa - pass - -__all__ = [ - 'Consumer', 'Connection', 'Events', 'Heart', 'Control', - 'Tasks', 'Evloop', 'Agent', 'Mingle', 'Gossip', 'dump_body', -] - -CLOSE = bootsteps.CLOSE -logger = get_logger(__name__) -debug, info, warn, error, crit = (logger.debug, logger.info, logger.warning, - logger.error, logger.critical) - -CONNECTION_RETRY = """\ -consumer: Connection to broker lost. \ -Trying to re-establish the connection...\ -""" - -CONNECTION_RETRY_STEP = """\ -Trying again {when}...\ -""" - -CONNECTION_ERROR = """\ -consumer: Cannot connect to %s: %s. -%s -""" - -CONNECTION_FAILOVER = """\ -Will retry using next failover.\ -""" - -UNKNOWN_FORMAT = """\ -Received and deleted unknown message. Wrong destination?!? - -The full contents of the message body was: %s -""" - -#: Error message for when an unregistered task is received. -UNKNOWN_TASK_ERROR = """\ -Received unregistered task of type %s. -The message has been ignored and discarded. - -Did you remember to import the module containing this task? -Or maybe you are using relative imports? -Please see http://bit.ly/gLye1c for more information. - -The full contents of the message body was: -%s -""" - -#: Error message for when an invalid task message is received. -INVALID_TASK_ERROR = """\ -Received invalid task message: %s -The message has been ignored and discarded. - -Please ensure your message conforms to the task -message protocol as described here: http://bit.ly/hYj41y - -The full contents of the message body was: -%s -""" - -MESSAGE_DECODE_ERROR = """\ -Can't decode message body: %r [type:%r encoding:%r headers:%s] - -body: %s -""" - -MESSAGE_REPORT = """\ -body: {0} -{{content_type:{1} content_encoding:{2} - delivery_info:{3} headers={4}}} -""" - -MINGLE_GET_FIELDS = itemgetter('clock', 'revoked') - - -def dump_body(m, body): - if isinstance(body, buffer_t): - body = bytes_t(body) - return '{0} ({1}b)'.format(truncate(safe_repr(body), 1024), - len(m.body)) - - -class Consumer(object): - Strategies = dict - - #: set when consumer is shutting down. - in_shutdown = False - - #: Optional callback called the first time the worker - #: is ready to receive tasks. - init_callback = None - - #: The current worker pool instance. - pool = None - - #: A timer used for high-priority internal tasks, such - #: as sending heartbeats. - timer = None - - restart_count = -1 # first start is the same as a restart - - class Blueprint(bootsteps.Blueprint): - name = 'Consumer' - default_steps = [ - 'celery.worker.consumer:Connection', - 'celery.worker.consumer:Mingle', - 'celery.worker.consumer:Events', - 'celery.worker.consumer:Gossip', - 'celery.worker.consumer:Heart', - 'celery.worker.consumer:Control', - 'celery.worker.consumer:Tasks', - 'celery.worker.consumer:Evloop', - 'celery.worker.consumer:Agent', - ] - - def shutdown(self, parent): - self.send_all(parent, 'shutdown') - - def __init__(self, on_task_request, - init_callback=noop, hostname=None, - pool=None, app=None, - timer=None, controller=None, hub=None, amqheartbeat=None, - worker_options=None, disable_rate_limits=False, - initial_prefetch_count=2, prefetch_multiplier=1, **kwargs): - self.app = app - self.controller = controller - self.init_callback = init_callback - self.hostname = hostname or socket.gethostname() - self.pid = os.getpid() - self.pool = pool - self.timer = timer - self.strategies = self.Strategies() - conninfo = self.app.connection() - self.connection_errors = conninfo.connection_errors - self.channel_errors = conninfo.channel_errors - self._restart_state = restart_state(maxR=5, maxT=1) - - self._does_info = logger.isEnabledFor(logging.INFO) - self.on_task_request = on_task_request - self.on_task_message = set() - self.amqheartbeat_rate = self.app.conf.BROKER_HEARTBEAT_CHECKRATE - self.disable_rate_limits = disable_rate_limits - self.initial_prefetch_count = initial_prefetch_count - self.prefetch_multiplier = prefetch_multiplier - - # this contains a tokenbucket for each task type by name, used for - # rate limits, or None if rate limits are disabled for that task. - self.task_buckets = defaultdict(lambda: None) - self.reset_rate_limits() - - self.hub = hub - if self.hub: - self.amqheartbeat = amqheartbeat - if self.amqheartbeat is None: - self.amqheartbeat = self.app.conf.BROKER_HEARTBEAT - else: - self.amqheartbeat = 0 - - if not hasattr(self, 'loop'): - self.loop = loops.asynloop if hub else loops.synloop - - if _detect_environment() == 'gevent': - # there's a gevent bug that causes timeouts to not be reset, - # so if the connection timeout is exceeded once, it can NEVER - # connect again. - self.app.conf.BROKER_CONNECTION_TIMEOUT = None - - self.steps = [] - self.blueprint = self.Blueprint( - app=self.app, on_close=self.on_close, - ) - self.blueprint.apply(self, **dict(worker_options or {}, **kwargs)) - - def bucket_for_task(self, type): - limit = rate(getattr(type, 'rate_limit', None)) - return TokenBucket(limit, capacity=1) if limit else None - - def reset_rate_limits(self): - self.task_buckets.update( - (n, self.bucket_for_task(t)) for n, t in items(self.app.tasks) - ) - - def _update_prefetch_count(self, index=0): - """Update prefetch count after pool/shrink grow operations. - - Index must be the change in number of processes as a positive - (increasing) or negative (decreasing) number. - - .. note:: - - Currently pool grow operations will end up with an offset - of +1 if the initial size of the pool was 0 (e.g. - ``--autoscale=1,0``). - - """ - num_processes = self.pool.num_processes - if not self.initial_prefetch_count or not num_processes: - return # prefetch disabled - self.initial_prefetch_count = ( - self.pool.num_processes * self.prefetch_multiplier - ) - return self._update_qos_eventually(index) - - def _update_qos_eventually(self, index): - return (self.qos.decrement_eventually if index < 0 - else self.qos.increment_eventually)( - abs(index) * self.prefetch_multiplier) - - def _limit_task(self, request, bucket, tokens): - if not bucket.can_consume(tokens): - hold = bucket.expected_time(tokens) - self.timer.call_after( - hold, self._limit_task, (request, bucket, tokens), - ) - else: - task_reserved(request) - self.on_task_request(request) - - def start(self): - blueprint = self.blueprint - while blueprint.state != CLOSE: - self.restart_count += 1 - maybe_shutdown() - try: - blueprint.start(self) - except self.connection_errors as exc: - if isinstance(exc, OSError) and get_errno(exc) == errno.EMFILE: - raise # Too many open files - maybe_shutdown() - try: - self._restart_state.step() - except RestartFreqExceeded as exc: - crit('Frequent restarts detected: %r', exc, exc_info=1) - sleep(1) - if blueprint.state != CLOSE and self.connection: - warn(CONNECTION_RETRY, exc_info=True) - try: - self.connection.collect() - except Exception: - pass - self.on_close() - blueprint.restart(self) - - def register_with_event_loop(self, hub): - self.blueprint.send_all( - self, 'register_with_event_loop', args=(hub, ), - description='Hub.register', - ) - - def shutdown(self): - self.in_shutdown = True - self.blueprint.shutdown(self) - - def stop(self): - self.blueprint.stop(self) - - def on_ready(self): - callback, self.init_callback = self.init_callback, None - if callback: - callback(self) - - def loop_args(self): - return (self, self.connection, self.task_consumer, - self.blueprint, self.hub, self.qos, self.amqheartbeat, - self.app.clock, self.amqheartbeat_rate) - - def on_decode_error(self, message, exc): - """Callback called if an error occurs while decoding - a message received. - - Simply logs the error and acknowledges the message so it - doesn't enter a loop. - - :param message: The message with errors. - :param exc: The original exception instance. - - """ - crit(MESSAGE_DECODE_ERROR, - exc, message.content_type, message.content_encoding, - safe_repr(message.headers), dump_body(message, message.body), - exc_info=1) - message.ack() - - def on_close(self): - # Clear internal queues to get rid of old messages. - # They can't be acked anyway, as a delivery tag is specific - # to the current channel. - if self.controller and self.controller.semaphore: - self.controller.semaphore.clear() - if self.timer: - self.timer.clear() - reserved_requests.clear() - if self.pool and self.pool.flush: - self.pool.flush() - - def connect(self): - """Establish the broker connection. - - Will retry establishing the connection if the - :setting:`BROKER_CONNECTION_RETRY` setting is enabled - - """ - conn = self.app.connection(heartbeat=self.amqheartbeat) - - # Callback called for each retry while the connection - # can't be established. - def _error_handler(exc, interval, next_step=CONNECTION_RETRY_STEP): - if getattr(conn, 'alt', None) and interval == 0: - next_step = CONNECTION_FAILOVER - error(CONNECTION_ERROR, conn.as_uri(), exc, - next_step.format(when=humanize_seconds(interval, 'in', ' '))) - - # remember that the connection is lazy, it won't establish - # until needed. - if not self.app.conf.BROKER_CONNECTION_RETRY: - # retry disabled, just call connect directly. - conn.connect() - return conn - - conn = conn.ensure_connection( - _error_handler, self.app.conf.BROKER_CONNECTION_MAX_RETRIES, - callback=maybe_shutdown, - ) - if self.hub: - conn.transport.register_with_event_loop(conn.connection, self.hub) - return conn - - def add_task_queue(self, queue, exchange=None, exchange_type=None, - routing_key=None, **options): - cset = self.task_consumer - queues = self.app.amqp.queues - # Must use in' here, as __missing__ will automatically - # create queues when CELERY_CREATE_MISSING_QUEUES is enabled. - # (Issue #1079) - if queue in queues: - q = queues[queue] - else: - exchange = queue if exchange is None else exchange - exchange_type = ('direct' if exchange_type is None - else exchange_type) - q = queues.select_add(queue, - exchange=exchange, - exchange_type=exchange_type, - routing_key=routing_key, **options) - if not cset.consuming_from(queue): - cset.add_queue(q) - cset.consume() - info('Started consuming from %s', queue) - - def cancel_task_queue(self, queue): - info('Canceling queue %s', queue) - self.app.amqp.queues.deselect(queue) - self.task_consumer.cancel_by_queue(queue) - - def apply_eta_task(self, task): - """Method called by the timer to apply a task with an - ETA/countdown.""" - task_reserved(task) - self.on_task_request(task) - self.qos.decrement_eventually() - - def _message_report(self, body, message): - return MESSAGE_REPORT.format(dump_body(message, body), - safe_repr(message.content_type), - safe_repr(message.content_encoding), - safe_repr(message.delivery_info), - safe_repr(message.headers)) - - def on_unknown_message(self, body, message): - warn(UNKNOWN_FORMAT, self._message_report(body, message)) - message.reject_log_error(logger, self.connection_errors) - - def on_unknown_task(self, body, message, exc): - error(UNKNOWN_TASK_ERROR, exc, dump_body(message, body), exc_info=True) - message.reject_log_error(logger, self.connection_errors) - - def on_invalid_task(self, body, message, exc): - error(INVALID_TASK_ERROR, exc, dump_body(message, body), exc_info=True) - message.reject_log_error(logger, self.connection_errors) - - def update_strategies(self): - loader = self.app.loader - for name, task in items(self.app.tasks): - self.strategies[name] = task.start_strategy(self.app, self) - task.__trace__ = build_tracer(name, task, loader, self.hostname, - app=self.app) - - def create_task_handler(self): - strategies = self.strategies - on_unknown_message = self.on_unknown_message - on_unknown_task = self.on_unknown_task - on_invalid_task = self.on_invalid_task - callbacks = self.on_task_message - - def on_task_received(body, message): - headers = message.headers - try: - type_, is_proto2 = body['task'], 0 - except (KeyError, TypeError): - try: - type_, is_proto2 = headers['task'], 1 - except (KeyError, TypeError): - return on_unknown_message(body, message) - - if is_proto2: - body = proto2_to_proto1( - self.app, type_, body, message, headers) - - try: - strategies[type_](message, body, - message.ack_log_error, - message.reject_log_error, - callbacks) - except KeyError as exc: - on_unknown_task(body, message, exc) - except InvalidTaskError as exc: - on_invalid_task(body, message, exc) - - return on_task_received - - def __repr__(self): - return ''.format( - self=self, state=self.blueprint.human_state(), - ) - - -def proto2_to_proto1(app, type_, body, message, headers): - args, kwargs, embed = body - embedded = _extract_proto2_embed(**embed) - chained = embedded.pop('chain') - new_body = dict( - _extract_proto2_headers(type_, **headers), - args=args, - kwargs=kwargs, - **embedded) - if chained: - new_body['callbacks'].append(chain(chained, app=app)) - return new_body - - -def _extract_proto2_headers(type_, id, retries, eta, expires, - group, timelimit, **_): - return { - 'id': id, - 'task': type_, - 'retries': retries, - 'eta': eta, - 'expires': expires, - 'utc': True, - 'taskset': group, - 'timelimit': timelimit, - } - - -def _extract_proto2_embed(callbacks, errbacks, chain, chord, **_): - return { - 'callbacks': callbacks or [], - 'errbacks': errbacks, - 'chain': chain, - 'chord': chord, - } - - -class Connection(bootsteps.StartStopStep): - - def __init__(self, c, **kwargs): - c.connection = None - - def start(self, c): - c.connection = c.connect() - info('Connected to %s', c.connection.as_uri()) - - def shutdown(self, c): - # We must set self.connection to None here, so - # that the green pidbox thread exits. - connection, c.connection = c.connection, None - if connection: - ignore_errors(connection, connection.close) - - def info(self, c, params='N/A'): - if c.connection: - params = c.connection.info() - params.pop('password', None) # don't send password. - return {'broker': params} - - -class Events(bootsteps.StartStopStep): - requires = (Connection, ) - - def __init__(self, c, send_events=None, **kwargs): - self.send_events = True - self.groups = None if send_events else ['worker'] - c.event_dispatcher = None - - def start(self, c): - # flush events sent while connection was down. - prev = self._close(c) - dis = c.event_dispatcher = c.app.events.Dispatcher( - c.connect(), hostname=c.hostname, - enabled=self.send_events, groups=self.groups, - ) - if prev: - dis.extend_buffer(prev) - dis.flush() - - def stop(self, c): - pass - - def _close(self, c): - if c.event_dispatcher: - dispatcher = c.event_dispatcher - # remember changes from remote control commands: - self.groups = dispatcher.groups - - # close custom connection - if dispatcher.connection: - ignore_errors(c, dispatcher.connection.close) - ignore_errors(c, dispatcher.close) - c.event_dispatcher = None - return dispatcher - - def shutdown(self, c): - self._close(c) - - -class Heart(bootsteps.StartStopStep): - requires = (Events, ) - - def __init__(self, c, without_heartbeat=False, heartbeat_interval=None, - **kwargs): - self.enabled = not without_heartbeat - self.heartbeat_interval = heartbeat_interval - c.heart = None - - def start(self, c): - c.heart = heartbeat.Heart( - c.timer, c.event_dispatcher, self.heartbeat_interval, - ) - c.heart.start() - - def stop(self, c): - c.heart = c.heart and c.heart.stop() - shutdown = stop - - -class Mingle(bootsteps.StartStopStep): - label = 'Mingle' - requires = (Events, ) - compatible_transports = set(['amqp', 'redis']) - - def __init__(self, c, without_mingle=False, **kwargs): - self.enabled = not without_mingle and self.compatible_transport(c.app) - - def compatible_transport(self, app): - with app.connection() as conn: - return conn.transport.driver_type in self.compatible_transports - - def start(self, c): - info('mingle: searching for neighbors') - I = c.app.control.inspect(timeout=1.0, connection=c.connection) - replies = I.hello(c.hostname, revoked._data) or {} - replies.pop(c.hostname, None) - if replies: - info('mingle: sync with %s nodes', - len([reply for reply, value in items(replies) if value])) - for reply in values(replies): - if reply: - try: - other_clock, other_revoked = MINGLE_GET_FIELDS(reply) - except KeyError: # reply from pre-3.1 worker - pass - else: - c.app.clock.adjust(other_clock) - revoked.update(other_revoked) - info('mingle: sync complete') - else: - info('mingle: all alone') - - -class Tasks(bootsteps.StartStopStep): - requires = (Mingle, ) - - def __init__(self, c, **kwargs): - c.task_consumer = c.qos = None - - def start(self, c): - c.update_strategies() - - # - RabbitMQ 3.3 completely redefines how basic_qos works.. - # This will detect if the new qos smenatics is in effect, - # and if so make sure the 'apply_global' flag is set on qos updates. - qos_global = not c.connection.qos_semantics_matches_spec - - # set initial prefetch count - c.connection.default_channel.basic_qos( - 0, c.initial_prefetch_count, qos_global, - ) - - c.task_consumer = c.app.amqp.TaskConsumer( - c.connection, on_decode_error=c.on_decode_error, - ) - - def set_prefetch_count(prefetch_count): - return c.task_consumer.qos( - prefetch_count=prefetch_count, - apply_global=qos_global, - ) - c.qos = QoS(set_prefetch_count, c.initial_prefetch_count) - - def stop(self, c): - if c.task_consumer: - debug('Canceling task consumer...') - ignore_errors(c, c.task_consumer.cancel) - - def shutdown(self, c): - if c.task_consumer: - self.stop(c) - debug('Closing consumer channel...') - ignore_errors(c, c.task_consumer.close) - c.task_consumer = None - - def info(self, c): - return {'prefetch_count': c.qos.value if c.qos else 'N/A'} - - -class Agent(bootsteps.StartStopStep): - conditional = True - requires = (Connection, ) - - def __init__(self, c, **kwargs): - self.agent_cls = self.enabled = c.app.conf.CELERYD_AGENT - - def create(self, c): - agent = c.agent = self.instantiate(self.agent_cls, c.connection) - return agent - - -class Control(bootsteps.StartStopStep): - requires = (Tasks, ) - - def __init__(self, c, **kwargs): - self.is_green = c.pool is not None and c.pool.is_green - self.box = (pidbox.gPidbox if self.is_green else pidbox.Pidbox)(c) - self.start = self.box.start - self.stop = self.box.stop - self.shutdown = self.box.shutdown - - def include_if(self, c): - return c.app.conf.CELERY_ENABLE_REMOTE_CONTROL - - -class Gossip(bootsteps.ConsumerStep): - label = 'Gossip' - requires = (Mingle, ) - _cons_stamp_fields = itemgetter( - 'id', 'clock', 'hostname', 'pid', 'topic', 'action', 'cver', - ) - compatible_transports = set(['amqp', 'redis']) - - def __init__(self, c, without_gossip=False, interval=5.0, **kwargs): - self.enabled = not without_gossip and self.compatible_transport(c.app) - self.app = c.app - c.gossip = self - self.Receiver = c.app.events.Receiver - self.hostname = c.hostname - self.full_hostname = '.'.join([self.hostname, str(c.pid)]) - self.on = Bunch( - node_join=set(), - node_leave=set(), - node_lost=set(), - ) - - self.timer = c.timer - if self.enabled: - self.state = c.app.events.State( - on_node_join=self.on_node_join, - on_node_leave=self.on_node_leave, - max_tasks_in_memory=1, - ) - if c.hub: - c._mutex = DummyLock() - self.update_state = self.state.event - self.interval = interval - self._tref = None - self.consensus_requests = defaultdict(list) - self.consensus_replies = {} - self.event_handlers = { - 'worker.elect': self.on_elect, - 'worker.elect.ack': self.on_elect_ack, - } - self.clock = c.app.clock - - self.election_handlers = { - 'task': self.call_task - } - - def compatible_transport(self, app): - with app.connection() as conn: - return conn.transport.driver_type in self.compatible_transports - - def election(self, id, topic, action=None): - self.consensus_replies[id] = [] - self.dispatcher.send( - 'worker-elect', - id=id, topic=topic, action=action, cver=1, - ) - - def call_task(self, task): - try: - signature(task, app=self.app).apply_async() - except Exception as exc: - error('Could not call task: %r', exc, exc_info=1) - - def on_elect(self, event): - try: - (id_, clock, hostname, pid, - topic, action, _) = self._cons_stamp_fields(event) - except KeyError as exc: - return error('election request missing field %s', exc, exc_info=1) - heappush( - self.consensus_requests[id_], - (clock, '%s.%s' % (hostname, pid), topic, action), - ) - self.dispatcher.send('worker-elect-ack', id=id_) - - def start(self, c): - super(Gossip, self).start(c) - self.dispatcher = c.event_dispatcher - - def on_elect_ack(self, event): - id = event['id'] - try: - replies = self.consensus_replies[id] - except KeyError: - return # not for us - alive_workers = self.state.alive_workers() - replies.append(event['hostname']) - - if len(replies) >= len(alive_workers): - _, leader, topic, action = self.clock.sort_heap( - self.consensus_requests[id], - ) - if leader == self.full_hostname: - info('I won the election %r', id) - try: - handler = self.election_handlers[topic] - except KeyError: - error('Unknown election topic %r', topic, exc_info=1) - else: - handler(action) - else: - info('node %s elected for %r', leader, id) - self.consensus_requests.pop(id, None) - self.consensus_replies.pop(id, None) - - def on_node_join(self, worker): - debug('%s joined the party', worker.hostname) - self._call_handlers(self.on.node_join, worker) - - def on_node_leave(self, worker): - debug('%s left', worker.hostname) - self._call_handlers(self.on.node_leave, worker) - - def on_node_lost(self, worker): - info('missed heartbeat from %s', worker.hostname) - self._call_handlers(self.on.node_lost, worker) - - def _call_handlers(self, handlers, *args, **kwargs): - for handler in handlers: - try: - handler(*args, **kwargs) - except Exception as exc: - error('Ignored error from handler %r: %r', - handler, exc, exc_info=1) - - def register_timer(self): - if self._tref is not None: - self._tref.cancel() - self._tref = self.timer.call_repeatedly(self.interval, self.periodic) - - def periodic(self): - workers = self.state.workers - dirty = set() - for worker in values(workers): - if not worker.alive: - dirty.add(worker) - self.on_node_lost(worker) - for worker in dirty: - workers.pop(worker.hostname, None) - - def get_consumers(self, channel): - self.register_timer() - ev = self.Receiver(channel, routing_key='worker.#') - return [kombu.Consumer( - channel, - queues=[ev.queue], - on_message=partial(self.on_message, ev.event_from_message), - no_ack=True - )] - - def on_message(self, prepare, message): - _type = message.delivery_info['routing_key'] - - # For redis when `fanout_patterns=False` (See Issue #1882) - if _type.split('.', 1)[0] == 'task': - return - try: - handler = self.event_handlers[_type] - except KeyError: - pass - else: - return handler(message.payload) - - hostname = (message.headers.get('hostname') or - message.payload['hostname']) - if hostname != self.hostname: - type, event = prepare(message.payload) - self.update_state(event) - else: - self.clock.forward() - - -class Evloop(bootsteps.StartStopStep): - label = 'event loop' - last = True - - def start(self, c): - self.patch_all(c) - c.loop(*c.loop_args()) - - def patch_all(self, c): - c.qos._mutex = DummyLock() diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/control.py b/thesisenv/lib/python3.6/site-packages/celery/worker/control.py deleted file mode 100644 index e8b033d..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/control.py +++ /dev/null @@ -1,385 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.worker.control - ~~~~~~~~~~~~~~~~~~~~~ - - Remote control commands. - -""" -from __future__ import absolute_import - -import io -import tempfile - -from kombu.utils.encoding import safe_repr - -from celery.exceptions import WorkerShutdown -from celery.five import UserDict, items, string_t -from celery.platforms import signals as _signals -from celery.utils import timeutils -from celery.utils.functional import maybe_list -from celery.utils.log import get_logger -from celery.utils import jsonify - -from . import state as worker_state -from .state import revoked -from .job import Request - -__all__ = ['Panel'] -DEFAULT_TASK_INFO_ITEMS = ('exchange', 'routing_key', 'rate_limit') -logger = get_logger(__name__) - - -class Panel(UserDict): - data = dict() # Global registry. - - @classmethod - def register(cls, method, name=None): - cls.data[name or method.__name__] = method - return method - - -def _find_requests_by_id(ids, requests): - found, total = 0, len(ids) - for request in requests: - if request.id in ids: - yield request - found += 1 - if found >= total: - break - - -@Panel.register -def query_task(state, ids, **kwargs): - ids = maybe_list(ids) - - def reqinfo(state, req): - return state, req.info() - - reqs = dict((req.id, ('reserved', req.info())) - for req in _find_requests_by_id( - ids, worker_state.reserved_requests)) - reqs.update(dict( - (req.id, ('active', req.info())) - for req in _find_requests_by_id( - ids, worker_state.active_requests, - ) - )) - - return reqs - - -@Panel.register -def revoke(state, task_id, terminate=False, signal=None, **kwargs): - """Revoke task by task id.""" - # supports list argument since 3.1 - task_ids, task_id = set(maybe_list(task_id) or []), None - size = len(task_ids) - terminated = set() - - revoked.update(task_ids) - if terminate: - signum = _signals.signum(signal or 'TERM') - # reserved_requests changes size during iteration - # so need to consume the items first, then terminate after. - requests = set(_find_requests_by_id( - task_ids, - worker_state.reserved_requests, - )) - for request in requests: - if request.id not in terminated: - terminated.add(request.id) - logger.info('Terminating %s (%s)', request.id, signum) - request.terminate(state.consumer.pool, signal=signum) - if len(terminated) >= size: - break - - if not terminated: - return {'ok': 'terminate: tasks unknown'} - return {'ok': 'terminate: {0}'.format(', '.join(terminated))} - - idstr = ', '.join(task_ids) - logger.info('Tasks flagged as revoked: %s', idstr) - return {'ok': 'tasks {0} flagged as revoked'.format(idstr)} - - -@Panel.register -def report(state): - return {'ok': state.app.bugreport()} - - -@Panel.register -def enable_events(state): - dispatcher = state.consumer.event_dispatcher - if dispatcher.groups and 'task' not in dispatcher.groups: - dispatcher.groups.add('task') - logger.info('Events of group {task} enabled by remote.') - return {'ok': 'task events enabled'} - return {'ok': 'task events already enabled'} - - -@Panel.register -def disable_events(state): - dispatcher = state.consumer.event_dispatcher - if 'task' in dispatcher.groups: - dispatcher.groups.discard('task') - logger.info('Events of group {task} disabled by remote.') - return {'ok': 'task events disabled'} - return {'ok': 'task events already disabled'} - - -@Panel.register -def heartbeat(state): - logger.debug('Heartbeat requested by remote.') - dispatcher = state.consumer.event_dispatcher - dispatcher.send('worker-heartbeat', freq=5, **worker_state.SOFTWARE_INFO) - - -@Panel.register -def rate_limit(state, task_name, rate_limit, **kwargs): - """Set new rate limit for a task type. - - See :attr:`celery.task.base.Task.rate_limit`. - - :param task_name: Type of task. - :param rate_limit: New rate limit. - - """ - - try: - timeutils.rate(rate_limit) - except ValueError as exc: - return {'error': 'Invalid rate limit string: {0!r}'.format(exc)} - - try: - state.app.tasks[task_name].rate_limit = rate_limit - except KeyError: - logger.error('Rate limit attempt for unknown task %s', - task_name, exc_info=True) - return {'error': 'unknown task'} - - state.consumer.reset_rate_limits() - - if not rate_limit: - logger.info('Rate limits disabled for tasks of type %s', task_name) - return {'ok': 'rate limit disabled successfully'} - - logger.info('New rate limit for tasks of type %s: %s.', - task_name, rate_limit) - return {'ok': 'new rate limit set successfully'} - - -@Panel.register -def time_limit(state, task_name=None, hard=None, soft=None, **kwargs): - try: - task = state.app.tasks[task_name] - except KeyError: - logger.error('Change time limit attempt for unknown task %s', - task_name, exc_info=True) - return {'error': 'unknown task'} - - task.soft_time_limit = soft - task.time_limit = hard - - logger.info('New time limits for tasks of type %s: soft=%s hard=%s', - task_name, soft, hard) - return {'ok': 'time limits set successfully'} - - -@Panel.register -def dump_schedule(state, safe=False, **kwargs): - - def prepare_entries(): - for waiting in state.consumer.timer.schedule.queue: - try: - arg0 = waiting.entry.args[0] - except (IndexError, TypeError): - continue - else: - if isinstance(arg0, Request): - yield {'eta': arg0.eta.isoformat() if arg0.eta else None, - 'priority': waiting.priority, - 'request': arg0.info(safe=safe)} - return list(prepare_entries()) - - -@Panel.register -def dump_reserved(state, safe=False, **kwargs): - reserved = worker_state.reserved_requests - worker_state.active_requests - if not reserved: - return [] - return [request.info(safe=safe) for request in reserved] - - -@Panel.register -def dump_active(state, safe=False, **kwargs): - return [request.info(safe=safe) - for request in worker_state.active_requests] - - -@Panel.register -def stats(state, **kwargs): - return state.consumer.controller.stats() - - -@Panel.register -def objgraph(state, num=200, max_depth=10, type='Request'): # pragma: no cover - try: - import objgraph - except ImportError: - raise ImportError('Requires the objgraph library') - print('Dumping graph for type %r' % (type, )) - with tempfile.NamedTemporaryFile(prefix='cobjg', - suffix='.png', delete=False) as fh: - objects = objgraph.by_type(type)[:num] - objgraph.show_backrefs( - objects, - max_depth=max_depth, highlight=lambda v: v in objects, - filename=fh.name, - ) - return {'filename': fh.name} - - -@Panel.register -def memsample(state, **kwargs): # pragma: no cover - from celery.utils.debug import sample_mem - return sample_mem() - - -@Panel.register -def memdump(state, samples=10, **kwargs): # pragma: no cover - from celery.utils.debug import memdump - out = io.StringIO() - memdump(file=out) - return out.getvalue() - - -@Panel.register -def clock(state, **kwargs): - return {'clock': state.app.clock.value} - - -@Panel.register -def dump_revoked(state, **kwargs): - return list(worker_state.revoked) - - -@Panel.register -def hello(state, from_node, revoked=None, **kwargs): - if from_node != state.hostname: - logger.info('sync with %s', from_node) - if revoked: - worker_state.revoked.update(revoked) - return {'revoked': worker_state.revoked._data, - 'clock': state.app.clock.forward()} - - -@Panel.register -def dump_tasks(state, taskinfoitems=None, builtins=False, **kwargs): - reg = state.app.tasks - taskinfoitems = taskinfoitems or DEFAULT_TASK_INFO_ITEMS - - tasks = reg if builtins else ( - task for task in reg if not task.startswith('celery.')) - - def _extract_info(task): - fields = dict((field, str(getattr(task, field, None))) - for field in taskinfoitems - if getattr(task, field, None) is not None) - if fields: - info = ['='.join(f) for f in items(fields)] - return '{0} [{1}]'.format(task.name, ' '.join(info)) - return task.name - - return [_extract_info(reg[task]) for task in sorted(tasks)] - - -@Panel.register -def ping(state, **kwargs): - return {'ok': 'pong'} - - -@Panel.register -def pool_grow(state, n=1, **kwargs): - if state.consumer.controller.autoscaler: - state.consumer.controller.autoscaler.force_scale_up(n) - else: - state.consumer.pool.grow(n) - state.consumer._update_prefetch_count(n) - return {'ok': 'pool will grow'} - - -@Panel.register -def pool_shrink(state, n=1, **kwargs): - if state.consumer.controller.autoscaler: - state.consumer.controller.autoscaler.force_scale_down(n) - else: - state.consumer.pool.shrink(n) - state.consumer._update_prefetch_count(-n) - return {'ok': 'pool will shrink'} - - -@Panel.register -def pool_restart(state, modules=None, reload=False, reloader=None, **kwargs): - if state.app.conf.CELERYD_POOL_RESTARTS: - state.consumer.controller.reload(modules, reload, reloader=reloader) - return {'ok': 'reload started'} - else: - raise ValueError('Pool restarts not enabled') - - -@Panel.register -def autoscale(state, max=None, min=None): - autoscaler = state.consumer.controller.autoscaler - if autoscaler: - max_, min_ = autoscaler.update(max, min) - return {'ok': 'autoscale now max={0} min={1}'.format(max_, min_)} - raise ValueError('Autoscale not enabled') - - -@Panel.register -def shutdown(state, msg='Got shutdown from remote', **kwargs): - logger.warning(msg) - raise WorkerShutdown(msg) - - -@Panel.register -def add_consumer(state, queue, exchange=None, exchange_type=None, - routing_key=None, **options): - state.consumer.add_task_queue(queue, exchange, exchange_type, - routing_key, **options) - return {'ok': 'add consumer {0}'.format(queue)} - - -@Panel.register -def cancel_consumer(state, queue=None, **_): - state.consumer.cancel_task_queue(queue) - return {'ok': 'no longer consuming from {0}'.format(queue)} - - -@Panel.register -def active_queues(state): - """Return information about the queues a worker consumes from.""" - if state.consumer.task_consumer: - return [dict(queue.as_dict(recurse=True)) - for queue in state.consumer.task_consumer.queues] - return [] - - -def _wanted_config_key(key): - return (isinstance(key, string_t) and - key.isupper() and - not key.startswith('__')) - - -@Panel.register -def dump_conf(state, with_defaults=False, **kwargs): - return jsonify(state.app.conf.table(with_defaults=with_defaults), - keyfilter=_wanted_config_key, - unknown_type_filter=safe_repr) - - -@Panel.register -def election(state, id, topic, action=None, **kwargs): - if state.consumer.gossip: - state.consumer.gossip.election(id, topic, action) diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/heartbeat.py b/thesisenv/lib/python3.6/site-packages/celery/worker/heartbeat.py deleted file mode 100644 index cf46ab0..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/heartbeat.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.worker.heartbeat - ~~~~~~~~~~~~~~~~~~~~~~~ - - This is the internal thread that sends heartbeat events - at regular intervals. - -""" -from __future__ import absolute_import - -from celery.utils.sysinfo import load_average - -from .state import SOFTWARE_INFO, active_requests, all_total_count - -__all__ = ['Heart'] - - -class Heart(object): - """Timer sending heartbeats at regular intervals. - - :param timer: Timer instance. - :param eventer: Event dispatcher used to send the event. - :keyword interval: Time in seconds between heartbeats. - Default is 2 seconds. - - """ - - def __init__(self, timer, eventer, interval=None): - self.timer = timer - self.eventer = eventer - self.interval = float(interval or 2.0) - self.tref = None - - # Make event dispatcher start/stop us when enabled/disabled. - self.eventer.on_enabled.add(self.start) - self.eventer.on_disabled.add(self.stop) - - def _send(self, event): - return self.eventer.send(event, freq=self.interval, - active=len(active_requests), - processed=all_total_count[0], - loadavg=load_average(), - **SOFTWARE_INFO) - - def start(self): - if self.eventer.enabled: - self._send('worker-online') - self.tref = self.timer.call_repeatedly( - self.interval, self._send, ('worker-heartbeat', ), - ) - - def stop(self): - if self.tref is not None: - self.timer.cancel(self.tref) - self.tref = None - if self.eventer.enabled: - self._send('worker-offline') diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/job.py b/thesisenv/lib/python3.6/site-packages/celery/worker/job.py deleted file mode 100644 index 793de3d..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/job.py +++ /dev/null @@ -1,595 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.worker.job - ~~~~~~~~~~~~~~~~~ - - This module defines the :class:`Request` class, - which specifies how tasks are executed. - -""" -from __future__ import absolute_import, unicode_literals - -import logging -import socket -import sys - -from billiard.einfo import ExceptionInfo -from datetime import datetime -from weakref import ref - -from kombu.utils import kwdict, reprcall -from kombu.utils.encoding import safe_repr, safe_str - -from celery import signals -from celery.app.trace import trace_task, trace_task_ret -from celery.exceptions import ( - Ignore, TaskRevokedError, InvalidTaskError, - SoftTimeLimitExceeded, TimeLimitExceeded, - WorkerLostError, Terminated, Retry, Reject, -) -from celery.five import items, monotonic, string, string_t -from celery.platforms import signals as _signals -from celery.utils import fun_takes_kwargs -from celery.utils.functional import noop -from celery.utils.log import get_logger -from celery.utils.serialization import get_pickled_exception -from celery.utils.text import truncate -from celery.utils.timeutils import maybe_iso8601, timezone, maybe_make_aware - -from . import state - -__all__ = ['Request'] - -IS_PYPY = hasattr(sys, 'pypy_version_info') - -logger = get_logger(__name__) -debug, info, warn, error = (logger.debug, logger.info, - logger.warning, logger.error) -_does_info = False -_does_debug = False - -#: Max length of result representation -RESULT_MAXLEN = 128 - - -def __optimize__(): - # this is also called by celery.app.trace.setup_worker_optimizations - global _does_debug - global _does_info - _does_debug = logger.isEnabledFor(logging.DEBUG) - _does_info = logger.isEnabledFor(logging.INFO) -__optimize__() - -# Localize -tz_utc = timezone.utc -tz_or_local = timezone.tz_or_local -send_revoked = signals.task_revoked.send - -task_accepted = state.task_accepted -task_ready = state.task_ready -revoked_tasks = state.revoked - -NEEDS_KWDICT = sys.version_info <= (2, 6) - -#: Use when no message object passed to :class:`Request`. -DEFAULT_FIELDS = { - 'headers': None, - 'reply_to': None, - 'correlation_id': None, - 'delivery_info': { - 'exchange': None, - 'routing_key': None, - 'priority': 0, - 'redelivered': False, - }, -} - - -class Request(object): - """A request for task execution.""" - if not IS_PYPY: # pragma: no cover - __slots__ = ( - 'app', 'name', 'id', 'args', 'kwargs', 'on_ack', - 'hostname', 'eventer', 'connection_errors', 'task', 'eta', - 'expires', 'request_dict', 'acknowledged', 'on_reject', - 'utc', 'time_start', 'worker_pid', '_already_revoked', - '_terminate_on_ack', '_apply_result', - '_tzlocal', '__weakref__', '__dict__', - ) - - #: Format string used to log task success. - success_msg = """\ - Task %(name)s[%(id)s] succeeded in %(runtime)ss: %(return_value)s - """ - - #: Format string used to log task failure. - error_msg = """\ - Task %(name)s[%(id)s] %(description)s: %(exc)s - """ - - #: Format string used to log internal error. - internal_error_msg = """\ - Task %(name)s[%(id)s] %(description)s: %(exc)s - """ - - ignored_msg = """\ - Task %(name)s[%(id)s] %(description)s - """ - - rejected_msg = """\ - Task %(name)s[%(id)s] %(exc)s - """ - - #: Format string used to log task retry. - retry_msg = """Task %(name)s[%(id)s] retry: %(exc)s""" - - def __init__(self, body, on_ack=noop, - hostname=None, eventer=None, app=None, - connection_errors=None, request_dict=None, - message=None, task=None, on_reject=noop, **opts): - self.app = app - name = self.name = body['task'] - self.id = body['id'] - self.args = body.get('args', []) - self.kwargs = body.get('kwargs', {}) - try: - self.kwargs.items - except AttributeError: - raise InvalidTaskError( - 'Task keyword arguments is not a mapping') - if NEEDS_KWDICT: - self.kwargs = kwdict(self.kwargs) - eta = body.get('eta') - expires = body.get('expires') - utc = self.utc = body.get('utc', False) - self.on_ack = on_ack - self.on_reject = on_reject - self.hostname = hostname or socket.gethostname() - self.eventer = eventer - self.connection_errors = connection_errors or () - self.task = task or self.app.tasks[name] - self.acknowledged = self._already_revoked = False - self.time_start = self.worker_pid = self._terminate_on_ack = None - self._apply_result = None - self._tzlocal = None - - # timezone means the message is timezone-aware, and the only timezone - # supported at this point is UTC. - if eta is not None: - try: - self.eta = maybe_iso8601(eta) - except (AttributeError, ValueError, TypeError) as exc: - raise InvalidTaskError( - 'invalid eta value {0!r}: {1}'.format(eta, exc)) - if utc: - self.eta = maybe_make_aware(self.eta, self.tzlocal) - else: - self.eta = None - if expires is not None: - try: - self.expires = maybe_iso8601(expires) - except (AttributeError, ValueError, TypeError) as exc: - raise InvalidTaskError( - 'invalid expires value {0!r}: {1}'.format(expires, exc)) - if utc: - self.expires = maybe_make_aware(self.expires, self.tzlocal) - else: - self.expires = None - - if message: - delivery_info = message.delivery_info or {} - properties = message.properties or {} - body.update({ - 'headers': message.headers, - 'reply_to': properties.get('reply_to'), - 'correlation_id': properties.get('correlation_id'), - 'delivery_info': { - 'exchange': delivery_info.get('exchange'), - 'routing_key': delivery_info.get('routing_key'), - 'priority': properties.get( - 'priority', delivery_info.get('priority')), - 'redelivered': delivery_info.get('redelivered'), - } - - }) - else: - body.update(DEFAULT_FIELDS) - self.request_dict = body - - @property - def delivery_info(self): - return self.request_dict['delivery_info'] - - def extend_with_default_kwargs(self): - """Extend the tasks keyword arguments with standard task arguments. - - Currently these are `logfile`, `loglevel`, `task_id`, - `task_name`, `task_retries`, and `delivery_info`. - - See :meth:`celery.task.base.Task.run` for more information. - - Magic keyword arguments are deprecated and will be removed - in version 4.0. - - """ - kwargs = dict(self.kwargs) - default_kwargs = {'logfile': None, # deprecated - 'loglevel': None, # deprecated - 'task_id': self.id, - 'task_name': self.name, - 'task_retries': self.request_dict.get('retries', 0), - 'task_is_eager': False, - 'delivery_info': self.delivery_info} - fun = self.task.run - supported_keys = fun_takes_kwargs(fun, default_kwargs) - extend_with = dict((key, val) for key, val in items(default_kwargs) - if key in supported_keys) - kwargs.update(extend_with) - return kwargs - - def execute_using_pool(self, pool, **kwargs): - """Used by the worker to send this task to the pool. - - :param pool: A :class:`celery.concurrency.base.TaskPool` instance. - - :raises celery.exceptions.TaskRevokedError: if the task was revoked - and ignored. - - """ - uuid = self.id - task = self.task - if self.revoked(): - raise TaskRevokedError(uuid) - - hostname = self.hostname - kwargs = self.kwargs - if task.accept_magic_kwargs: - kwargs = self.extend_with_default_kwargs() - request = self.request_dict - request.update({'hostname': hostname, 'is_eager': False, - 'delivery_info': self.delivery_info, - 'group': self.request_dict.get('taskset')}) - timeout, soft_timeout = request.get('timelimit', (None, None)) - timeout = timeout or task.time_limit - soft_timeout = soft_timeout or task.soft_time_limit - result = pool.apply_async( - trace_task_ret, - args=(self.name, uuid, self.args, kwargs, request), - accept_callback=self.on_accepted, - timeout_callback=self.on_timeout, - callback=self.on_success, - error_callback=self.on_failure, - soft_timeout=soft_timeout, - timeout=timeout, - correlation_id=uuid, - ) - # cannot create weakref to None - self._apply_result = ref(result) if result is not None else result - return result - - def execute(self, loglevel=None, logfile=None): - """Execute the task in a :func:`~celery.app.trace.trace_task`. - - :keyword loglevel: The loglevel used by the task. - :keyword logfile: The logfile used by the task. - - """ - if self.revoked(): - return - - # acknowledge task as being processed. - if not self.task.acks_late: - self.acknowledge() - - kwargs = self.kwargs - if self.task.accept_magic_kwargs: - kwargs = self.extend_with_default_kwargs() - request = self.request_dict - request.update({'loglevel': loglevel, 'logfile': logfile, - 'hostname': self.hostname, 'is_eager': False, - 'delivery_info': self.delivery_info}) - retval = trace_task(self.task, self.id, self.args, kwargs, request, - hostname=self.hostname, loader=self.app.loader, - app=self.app) - self.acknowledge() - return retval - - def maybe_expire(self): - """If expired, mark the task as revoked.""" - if self.expires: - now = datetime.now(self.expires.tzinfo) - if now > self.expires: - revoked_tasks.add(self.id) - return True - - def terminate(self, pool, signal=None): - signal = _signals.signum(signal or 'TERM') - if self.time_start: - pool.terminate_job(self.worker_pid, signal) - self._announce_revoked('terminated', True, signal, False) - else: - self._terminate_on_ack = pool, signal - if self._apply_result is not None: - obj = self._apply_result() # is a weakref - if obj is not None: - obj.terminate(signal) - - def _announce_revoked(self, reason, terminated, signum, expired): - task_ready(self) - self.send_event('task-revoked', - terminated=terminated, signum=signum, expired=expired) - if self.store_errors: - self.task.backend.mark_as_revoked(self.id, reason, request=self) - self.acknowledge() - self._already_revoked = True - send_revoked(self.task, request=self, - terminated=terminated, signum=signum, expired=expired) - - def revoked(self): - """If revoked, skip task and mark state.""" - expired = False - if self._already_revoked: - return True - if self.expires: - expired = self.maybe_expire() - if self.id in revoked_tasks: - info('Discarding revoked task: %s[%s]', self.name, self.id) - self._announce_revoked( - 'expired' if expired else 'revoked', False, None, expired, - ) - return True - return False - - def send_event(self, type, **fields): - if self.eventer and self.eventer.enabled and self.task.send_events: - self.eventer.send(type, uuid=self.id, **fields) - - def on_accepted(self, pid, time_accepted): - """Handler called when task is accepted by worker pool.""" - self.worker_pid = pid - self.time_start = time_accepted - task_accepted(self) - if not self.task.acks_late: - self.acknowledge() - self.send_event('task-started') - if _does_debug: - debug('Task accepted: %s[%s] pid:%r', self.name, self.id, pid) - if self._terminate_on_ack is not None: - self.terminate(*self._terminate_on_ack) - - def on_timeout(self, soft, timeout): - """Handler called if the task times out.""" - task_ready(self) - if soft: - warn('Soft time limit (%ss) exceeded for %s[%s]', - timeout, self.name, self.id) - exc = SoftTimeLimitExceeded(timeout) - else: - error('Hard time limit (%ss) exceeded for %s[%s]', - timeout, self.name, self.id) - exc = TimeLimitExceeded(timeout) - - if self.store_errors: - self.task.backend.mark_as_failure(self.id, exc, request=self) - - if self.task.acks_late: - self.acknowledge() - - def on_success(self, ret_value, now=None, nowfun=monotonic): - """Handler called if the task was successfully processed.""" - if isinstance(ret_value, ExceptionInfo): - if isinstance(ret_value.exception, ( - SystemExit, KeyboardInterrupt)): - raise ret_value.exception - return self.on_failure(ret_value) - task_ready(self) - - if self.task.acks_late: - self.acknowledge() - - if self.eventer and self.eventer.enabled: - now = nowfun() - runtime = self.time_start and (now - self.time_start) or 0 - self.send_event('task-succeeded', - result=safe_repr(ret_value), runtime=runtime) - - if _does_info: - now = now or nowfun() - runtime = self.time_start and (now - self.time_start) or 0 - info(self.success_msg.strip(), { - 'id': self.id, 'name': self.name, - 'return_value': self.repr_result(ret_value), - 'runtime': runtime}) - - def on_retry(self, exc_info): - """Handler called if the task should be retried.""" - if self.task.acks_late: - self.acknowledge() - - self.send_event('task-retried', - exception=safe_repr(exc_info.exception.exc), - traceback=safe_str(exc_info.traceback)) - - if _does_info: - info(self.retry_msg.strip(), - {'id': self.id, 'name': self.name, - 'exc': exc_info.exception}) - - def on_failure(self, exc_info): - """Handler called if the task raised an exception.""" - task_ready(self) - send_failed_event = True - - if not exc_info.internal: - exc = exc_info.exception - - if isinstance(exc, Retry): - return self.on_retry(exc_info) - - # These are special cases where the process would not have had - # time to write the result. - if self.store_errors: - if isinstance(exc, WorkerLostError): - self.task.backend.mark_as_failure( - self.id, exc, request=self, - ) - elif isinstance(exc, Terminated): - self._announce_revoked( - 'terminated', True, string(exc), False) - send_failed_event = False # already sent revoked event - # (acks_late) acknowledge after result stored. - if self.task.acks_late: - self.acknowledge() - self._log_error(exc_info, send_failed_event=send_failed_event) - - def _log_error(self, einfo, send_failed_event=True): - einfo.exception = get_pickled_exception(einfo.exception) - eobj = einfo.exception - exception, traceback, exc_info, internal, sargs, skwargs = ( - safe_repr(eobj), - safe_str(einfo.traceback), - einfo.exc_info, - einfo.internal, - safe_repr(self.args), - safe_repr(self.kwargs), - ) - task = self.task - if task.throws and isinstance(eobj, task.throws): - do_send_mail, severity, exc_info, description = ( - False, logging.INFO, None, 'raised expected', - ) - else: - do_send_mail, severity, description = ( - True, logging.ERROR, 'raised unexpected', - ) - - format = self.error_msg - if internal: - if isinstance(einfo.exception, MemoryError): - raise MemoryError('Process got: %s' % (einfo.exception, )) - elif isinstance(einfo.exception, Reject): - format = self.rejected_msg - description = 'rejected' - severity = logging.WARN - send_failed_event = False - self.reject(requeue=einfo.exception.requeue) - elif isinstance(einfo.exception, Ignore): - format = self.ignored_msg - description = 'ignored' - severity = logging.INFO - exc_info = None - send_failed_event = False - self.acknowledge() - else: - format = self.internal_error_msg - description = 'INTERNAL ERROR' - severity = logging.CRITICAL - - if send_failed_event: - self.send_event( - 'task-failed', exception=exception, traceback=traceback, - ) - - context = { - 'hostname': self.hostname, - 'id': self.id, - 'name': self.name, - 'exc': exception, - 'traceback': traceback, - 'args': sargs, - 'kwargs': skwargs, - 'description': description, - } - - logger.log(severity, format.strip(), context, - exc_info=exc_info, - extra={'data': {'id': self.id, - 'name': self.name, - 'args': sargs, - 'kwargs': skwargs, - 'hostname': self.hostname, - 'internal': internal}}) - - if do_send_mail: - task.send_error_email(context, einfo.exception) - - def acknowledge(self): - """Acknowledge task.""" - if not self.acknowledged: - self.on_ack(logger, self.connection_errors) - self.acknowledged = True - - def reject(self, requeue=False): - if not self.acknowledged: - self.on_reject(logger, self.connection_errors, requeue) - self.acknowledged = True - - def repr_result(self, result, maxlen=RESULT_MAXLEN): - # 46 is the length needed to fit - # 'the quick brown fox jumps over the lazy dog' :) - if not isinstance(result, string_t): - result = safe_repr(result) - return truncate(result) if len(result) > maxlen else result - - def info(self, safe=False): - return {'id': self.id, - 'name': self.name, - 'args': self.args if safe else safe_repr(self.args), - 'kwargs': self.kwargs if safe else safe_repr(self.kwargs), - 'hostname': self.hostname, - 'time_start': self.time_start, - 'acknowledged': self.acknowledged, - 'delivery_info': self.delivery_info, - 'worker_pid': self.worker_pid} - - def __str__(self): - return '{0.name}[{0.id}]{1}{2}'.format( - self, - ' eta:[{0}]'.format(self.eta) if self.eta else '', - ' expires:[{0}]'.format(self.expires) if self.expires else '', - ) - shortinfo = __str__ - - def __repr__(self): - return '<{0} {1}: {2}>'.format( - type(self).__name__, self.id, - reprcall(self.name, self.args, self.kwargs)) - - @property - def tzlocal(self): - if self._tzlocal is None: - self._tzlocal = self.app.conf.CELERY_TIMEZONE - return self._tzlocal - - @property - def store_errors(self): - return (not self.task.ignore_result or - self.task.store_errors_even_if_ignored) - - @property - def task_id(self): - # XXX compat - return self.id - - @task_id.setter # noqa - def task_id(self, value): - self.id = value - - @property - def task_name(self): - # XXX compat - return self.name - - @task_name.setter # noqa - def task_name(self, value): - self.name = value - - @property - def reply_to(self): - # used by rpc backend when failures reported by parent process - return self.request_dict['reply_to'] - - @property - def correlation_id(self): - # used similarly to reply_to - return self.request_dict['correlation_id'] diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/loops.py b/thesisenv/lib/python3.6/site-packages/celery/worker/loops.py deleted file mode 100644 index 8b006a8..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/loops.py +++ /dev/null @@ -1,108 +0,0 @@ -""" -celery.worker.loop -~~~~~~~~~~~~~~~~~~ - -The consumers highly-optimized inner loop. - -""" -from __future__ import absolute_import - -import socket - -from celery.bootsteps import RUN -from celery.exceptions import WorkerShutdown, WorkerTerminate, WorkerLostError -from celery.utils.log import get_logger - -from . import state - -__all__ = ['asynloop', 'synloop'] - -logger = get_logger(__name__) -error = logger.error - - -def asynloop(obj, connection, consumer, blueprint, hub, qos, - heartbeat, clock, hbrate=2.0, RUN=RUN): - """Non-blocking event loop consuming messages until connection is lost, - or shutdown is requested.""" - update_qos = qos.update - hbtick = connection.heartbeat_check - errors = connection.connection_errors - heartbeat = connection.get_heartbeat_interval() # negotiated - - on_task_received = obj.create_task_handler() - - if heartbeat and connection.supports_heartbeats: - hub.call_repeatedly(heartbeat / hbrate, hbtick, hbrate) - - consumer.callbacks = [on_task_received] - consumer.consume() - obj.on_ready() - obj.controller.register_with_event_loop(hub) - obj.register_with_event_loop(hub) - - # did_start_ok will verify that pool processes were able to start, - # but this will only work the first time we start, as - # maxtasksperchild will mess up metrics. - if not obj.restart_count and not obj.pool.did_start_ok(): - raise WorkerLostError('Could not start worker processes') - - # consumer.consume() may have prefetched up to our - # limit - drain an event so we are in a clean state - # prior to starting our event loop. - if connection.transport.driver_type == 'amqp': - hub.call_soon(connection.drain_events) - - # FIXME: Use loop.run_forever - # Tried and works, but no time to test properly before release. - hub.propagate_errors = errors - loop = hub.create_loop() - - try: - while blueprint.state == RUN and obj.connection: - # shutdown if signal handlers told us to. - if state.should_stop: - raise WorkerShutdown() - elif state.should_terminate: - raise WorkerTerminate() - - # We only update QoS when there is no more messages to read. - # This groups together qos calls, and makes sure that remote - # control commands will be prioritized over task messages. - if qos.prev != qos.value: - update_qos() - - try: - next(loop) - except StopIteration: - loop = hub.create_loop() - finally: - try: - hub.reset() - except Exception as exc: - error( - 'Error cleaning up after event loop: %r', exc, exc_info=1, - ) - - -def synloop(obj, connection, consumer, blueprint, hub, qos, - heartbeat, clock, hbrate=2.0, **kwargs): - """Fallback blocking event loop for transports that doesn't support AIO.""" - - on_task_received = obj.create_task_handler() - consumer.register_callback(on_task_received) - consumer.consume() - - obj.on_ready() - - while blueprint.state == RUN and obj.connection: - state.maybe_shutdown() - if qos.prev != qos.value: - qos.update() - try: - connection.drain_events(timeout=2.0) - except socket.timeout: - pass - except socket.error: - if blueprint.state == RUN: - raise diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/pidbox.py b/thesisenv/lib/python3.6/site-packages/celery/worker/pidbox.py deleted file mode 100644 index 058edd4..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/pidbox.py +++ /dev/null @@ -1,116 +0,0 @@ -from __future__ import absolute_import - -import socket -import threading - -from kombu.common import ignore_errors -from kombu.utils.encoding import safe_str - -from celery.datastructures import AttributeDict -from celery.utils.log import get_logger - -from . import control - -__all__ = ['Pidbox', 'gPidbox'] - -logger = get_logger(__name__) -debug, error, info = logger.debug, logger.error, logger.info - - -class Pidbox(object): - consumer = None - - def __init__(self, c): - self.c = c - self.hostname = c.hostname - self.node = c.app.control.mailbox.Node( - safe_str(c.hostname), - handlers=control.Panel.data, - state=AttributeDict(app=c.app, hostname=c.hostname, consumer=c), - ) - self._forward_clock = self.c.app.clock.forward - - def on_message(self, body, message): - # just increase clock as clients usually don't - # have a valid clock to adjust with. - self._forward_clock() - try: - self.node.handle_message(body, message) - except KeyError as exc: - error('No such control command: %s', exc) - except Exception as exc: - error('Control command error: %r', exc, exc_info=True) - self.reset() - - def start(self, c): - self.node.channel = c.connection.channel() - self.consumer = self.node.listen(callback=self.on_message) - self.consumer.on_decode_error = c.on_decode_error - - def on_stop(self): - pass - - def stop(self, c): - self.on_stop() - self.consumer = self._close_channel(c) - - def reset(self): - """Sets up the process mailbox.""" - self.stop(self.c) - self.start(self.c) - - def _close_channel(self, c): - if self.node and self.node.channel: - ignore_errors(c, self.node.channel.close) - - def shutdown(self, c): - self.on_stop() - if self.consumer: - debug('Canceling broadcast consumer...') - ignore_errors(c, self.consumer.cancel) - self.stop(self.c) - - -class gPidbox(Pidbox): - _node_shutdown = None - _node_stopped = None - _resets = 0 - - def start(self, c): - c.pool.spawn_n(self.loop, c) - - def on_stop(self): - if self._node_stopped: - self._node_shutdown.set() - debug('Waiting for broadcast thread to shutdown...') - self._node_stopped.wait() - self._node_stopped = self._node_shutdown = None - - def reset(self): - self._resets += 1 - - def _do_reset(self, c, connection): - self._close_channel(c) - self.node.channel = connection.channel() - self.consumer = self.node.listen(callback=self.on_message) - self.consumer.consume() - - def loop(self, c): - resets = [self._resets] - shutdown = self._node_shutdown = threading.Event() - stopped = self._node_stopped = threading.Event() - try: - with c.connect() as connection: - - info('pidbox: Connected to %s.', connection.as_uri()) - self._do_reset(c, connection) - while not shutdown.is_set() and c.connection: - if resets[0] < self._resets: - resets[0] += 1 - self._do_reset(c, connection) - try: - connection.drain_events(timeout=1.0) - except socket.timeout: - pass - finally: - stopped.set() diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/request.py b/thesisenv/lib/python3.6/site-packages/celery/worker/request.py deleted file mode 100644 index 8a65701..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/request.py +++ /dev/null @@ -1,536 +0,0 @@ -# -*- coding: utf-8 -*- -"""This module defines the :class:`Request` class, that specifies -how tasks are executed.""" -from __future__ import absolute_import, unicode_literals - -import logging -import sys - -from datetime import datetime -from weakref import ref - -from billiard.common import TERM_SIGNAME -from kombu.utils.encoding import safe_repr, safe_str -from kombu.utils.objects import cached_property - -from celery import signals -from celery.app.trace import trace_task, trace_task_ret -from celery.exceptions import ( - Ignore, TaskRevokedError, InvalidTaskError, - SoftTimeLimitExceeded, TimeLimitExceeded, - WorkerLostError, Terminated, Retry, Reject, -) -from celery.five import python_2_unicode_compatible, string -from celery.platforms import signals as _signals -from celery.utils.functional import maybe, noop -from celery.utils.log import get_logger -from celery.utils.nodenames import gethostname -from celery.utils.time import maybe_iso8601, timezone, maybe_make_aware -from celery.utils.serialization import get_pickled_exception - -from . import state - -__all__ = ['Request'] - -IS_PYPY = hasattr(sys, 'pypy_version_info') - -logger = get_logger(__name__) -debug, info, warn, error = (logger.debug, logger.info, - logger.warning, logger.error) -_does_info = False -_does_debug = False - - -def __optimize__(): - # this is also called by celery.app.trace.setup_worker_optimizations - global _does_debug - global _does_info - _does_debug = logger.isEnabledFor(logging.DEBUG) - _does_info = logger.isEnabledFor(logging.INFO) -__optimize__() - -# Localize -tz_or_local = timezone.tz_or_local -send_revoked = signals.task_revoked.send - -task_accepted = state.task_accepted -task_ready = state.task_ready -revoked_tasks = state.revoked - - -@python_2_unicode_compatible -class Request(object): - """A request for task execution.""" - acknowledged = False - time_start = None - worker_pid = None - time_limits = (None, None) - _already_revoked = False - _terminate_on_ack = None - _apply_result = None - _tzlocal = None - - if not IS_PYPY: # pragma: no cover - __slots__ = ( - 'app', 'type', 'name', 'id', 'root_id', 'parent_id', - 'on_ack', 'body', 'hostname', 'eventer', 'connection_errors', - 'task', 'eta', 'expires', 'request_dict', 'on_reject', 'utc', - 'content_type', 'content_encoding', 'argsrepr', 'kwargsrepr', - '_decoded', - '__weakref__', '__dict__', - ) - - def __init__(self, message, on_ack=noop, - hostname=None, eventer=None, app=None, - connection_errors=None, request_dict=None, - task=None, on_reject=noop, body=None, - headers=None, decoded=False, utc=True, - maybe_make_aware=maybe_make_aware, - maybe_iso8601=maybe_iso8601, **opts): - if headers is None: - headers = message.headers - if body is None: - body = message.body - self.app = app - self.message = message - self.body = body - self.utc = utc - self._decoded = decoded - if decoded: - self.content_type = self.content_encoding = None - else: - self.content_type, self.content_encoding = ( - message.content_type, message.content_encoding, - ) - - self.id = headers['id'] - type = self.type = self.name = headers['task'] - self.root_id = headers.get('root_id') - self.parent_id = headers.get('parent_id') - if 'shadow' in headers: - self.name = headers['shadow'] or self.name - if 'timelimit' in headers: - self.time_limits = headers['timelimit'] - self.argsrepr = headers.get('argsrepr', '') - self.kwargsrepr = headers.get('kwargsrepr', '') - self.on_ack = on_ack - self.on_reject = on_reject - self.hostname = hostname or gethostname() - self.eventer = eventer - self.connection_errors = connection_errors or () - self.task = task or self.app.tasks[type] - - # timezone means the message is timezone-aware, and the only timezone - # supported at this point is UTC. - eta = headers.get('eta') - if eta is not None: - try: - eta = maybe_iso8601(eta) - except (AttributeError, ValueError, TypeError) as exc: - raise InvalidTaskError( - 'invalid ETA value {0!r}: {1}'.format(eta, exc)) - self.eta = maybe_make_aware(eta, self.tzlocal) - else: - self.eta = None - - expires = headers.get('expires') - if expires is not None: - try: - expires = maybe_iso8601(expires) - except (AttributeError, ValueError, TypeError) as exc: - raise InvalidTaskError( - 'invalid expires value {0!r}: {1}'.format(expires, exc)) - self.expires = maybe_make_aware(expires, self.tzlocal) - else: - self.expires = None - - delivery_info = message.delivery_info or {} - properties = message.properties or {} - headers.update({ - 'reply_to': properties.get('reply_to'), - 'correlation_id': properties.get('correlation_id'), - 'delivery_info': { - 'exchange': delivery_info.get('exchange'), - 'routing_key': delivery_info.get('routing_key'), - 'priority': properties.get('priority'), - 'redelivered': delivery_info.get('redelivered'), - } - - }) - self.request_dict = headers - - @property - def delivery_info(self): - return self.request_dict['delivery_info'] - - def execute_using_pool(self, pool, **kwargs): - """Used by the worker to send this task to the pool. - - Arguments: - pool (~celery.concurrency.base.TaskPool): The execution pool - used to execute this request. - - Raises: - celery.exceptions.TaskRevokedError: if the task was revoked. - """ - task_id = self.id - task = self.task - if self.revoked(): - raise TaskRevokedError(task_id) - - time_limit, soft_time_limit = self.time_limits - result = pool.apply_async( - trace_task_ret, - args=(self.type, task_id, self.request_dict, self.body, - self.content_type, self.content_encoding), - accept_callback=self.on_accepted, - timeout_callback=self.on_timeout, - callback=self.on_success, - error_callback=self.on_failure, - soft_timeout=soft_time_limit or task.soft_time_limit, - timeout=time_limit or task.time_limit, - correlation_id=task_id, - ) - # cannot create weakref to None - self._apply_result = maybe(ref, result) - return result - - def execute(self, loglevel=None, logfile=None): - """Execute the task in a :func:`~celery.app.trace.trace_task`. - - Arguments: - loglevel (int): The loglevel used by the task. - logfile (str): The logfile used by the task. - """ - if self.revoked(): - return - - # acknowledge task as being processed. - if not self.task.acks_late: - self.acknowledge() - - request = self.request_dict - args, kwargs, embed = self._payload - request.update({'loglevel': loglevel, 'logfile': logfile, - 'hostname': self.hostname, 'is_eager': False, - 'args': args, 'kwargs': kwargs}, **embed or {}) - retval = trace_task(self.task, self.id, args, kwargs, request, - hostname=self.hostname, loader=self.app.loader, - app=self.app)[0] - self.acknowledge() - return retval - - def maybe_expire(self): - """If expired, mark the task as revoked.""" - if self.expires: - now = datetime.now(self.expires.tzinfo) - if now > self.expires: - revoked_tasks.add(self.id) - return True - - def terminate(self, pool, signal=None): - signal = _signals.signum(signal or TERM_SIGNAME) - if self.time_start: - pool.terminate_job(self.worker_pid, signal) - self._announce_revoked('terminated', True, signal, False) - else: - self._terminate_on_ack = pool, signal - if self._apply_result is not None: - obj = self._apply_result() # is a weakref - if obj is not None: - obj.terminate(signal) - - def _announce_revoked(self, reason, terminated, signum, expired): - task_ready(self) - self.send_event('task-revoked', - terminated=terminated, signum=signum, expired=expired) - self.task.backend.mark_as_revoked( - self.id, reason, request=self, store_result=self.store_errors, - ) - self.acknowledge() - self._already_revoked = True - send_revoked(self.task, request=self, - terminated=terminated, signum=signum, expired=expired) - - def revoked(self): - """If revoked, skip task and mark state.""" - expired = False - if self._already_revoked: - return True - if self.expires: - expired = self.maybe_expire() - if self.id in revoked_tasks: - info('Discarding revoked task: %s[%s]', self.name, self.id) - self._announce_revoked( - 'expired' if expired else 'revoked', False, None, expired, - ) - return True - return False - - def send_event(self, type, **fields): - if self.eventer and self.eventer.enabled and self.task.send_events: - self.eventer.send(type, uuid=self.id, **fields) - - def on_accepted(self, pid, time_accepted): - """Handler called when task is accepted by worker pool.""" - self.worker_pid = pid - self.time_start = time_accepted - task_accepted(self) - if not self.task.acks_late: - self.acknowledge() - self.send_event('task-started') - if _does_debug: - debug('Task accepted: %s[%s] pid:%r', self.name, self.id, pid) - if self._terminate_on_ack is not None: - self.terminate(*self._terminate_on_ack) - - def on_timeout(self, soft, timeout): - """Handler called if the task times out.""" - task_ready(self) - if soft: - warn('Soft time limit (%ss) exceeded for %s[%s]', - soft, self.name, self.id) - exc = SoftTimeLimitExceeded(soft) - else: - error('Hard time limit (%ss) exceeded for %s[%s]', - timeout, self.name, self.id) - exc = TimeLimitExceeded(timeout) - - self.task.backend.mark_as_failure( - self.id, exc, request=self, store_result=self.store_errors, - ) - - if self.task.acks_late: - self.acknowledge() - - def on_success(self, failed__retval__runtime, **kwargs): - """Handler called if the task was successfully processed.""" - failed, retval, runtime = failed__retval__runtime - if failed: - if isinstance(retval.exception, (SystemExit, KeyboardInterrupt)): - raise retval.exception - return self.on_failure(retval, return_ok=True) - task_ready(self) - - if self.task.acks_late: - self.acknowledge() - - self.send_event('task-succeeded', result=retval, runtime=runtime) - - def on_retry(self, exc_info): - """Handler called if the task should be retried.""" - if self.task.acks_late: - self.acknowledge() - - self.send_event('task-retried', - exception=safe_repr(exc_info.exception.exc), - traceback=safe_str(exc_info.traceback)) - - def on_failure(self, exc_info, send_failed_event=True, return_ok=False): - """Handler called if the task raised an exception.""" - task_ready(self) - if isinstance(exc_info.exception, MemoryError): - raise MemoryError('Process got: %s' % (exc_info.exception,)) - elif isinstance(exc_info.exception, Reject): - return self.reject(requeue=exc_info.exception.requeue) - elif isinstance(exc_info.exception, Ignore): - return self.acknowledge() - - exc = exc_info.exception - - if isinstance(exc, Retry): - return self.on_retry(exc_info) - - # These are special cases where the process wouldn't've had - # time to write the result. - if isinstance(exc, Terminated): - self._announce_revoked( - 'terminated', True, string(exc), False) - send_failed_event = False # already sent revoked event - elif isinstance(exc, WorkerLostError) or not return_ok: - self.task.backend.mark_as_failure( - self.id, exc, request=self, store_result=self.store_errors, - ) - # (acks_late) acknowledge after result stored. - if self.task.acks_late: - requeue = self.delivery_info.get('redelivered', None) is False - reject = ( - self.task.reject_on_worker_lost and - isinstance(exc, WorkerLostError) - ) - if reject: - self.reject(requeue=requeue) - send_failed_event = False - else: - self.acknowledge() - - if send_failed_event: - self.send_event( - 'task-failed', - exception=safe_repr(get_pickled_exception(exc_info.exception)), - traceback=exc_info.traceback, - ) - - if not return_ok: - error('Task handler raised error: %r', exc, - exc_info=exc_info.exc_info) - - def acknowledge(self): - """Acknowledge task.""" - if not self.acknowledged: - self.on_ack(logger, self.connection_errors) - self.acknowledged = True - - def reject(self, requeue=False): - if not self.acknowledged: - self.on_reject(logger, self.connection_errors, requeue) - self.acknowledged = True - self.send_event('task-rejected', requeue=requeue) - - def info(self, safe=False): - return { - 'id': self.id, - 'name': self.name, - 'args': self.argsrepr, - 'kwargs': self.kwargsrepr, - 'type': self.type, - 'body': self.body, - 'hostname': self.hostname, - 'time_start': self.time_start, - 'acknowledged': self.acknowledged, - 'delivery_info': self.delivery_info, - 'worker_pid': self.worker_pid, - } - - def __str__(self): - return ' '.join([ - self.humaninfo(), - ' ETA:[{0}]'.format(self.eta) if self.eta else '', - ' expires:[{0}]'.format(self.expires) if self.expires else '', - ]) - - def humaninfo(self): - return '{0.name}[{0.id}]'.format(self) - - def __repr__(self): - return '<{0}: {1} {2} {3}>'.format( - type(self).__name__, self.humaninfo(), - self.argsrepr, self.kwargsrepr, - ) - - @property - def tzlocal(self): - if self._tzlocal is None: - self._tzlocal = self.app.conf.timezone - return self._tzlocal - - @property - def store_errors(self): - return (not self.task.ignore_result or - self.task.store_errors_even_if_ignored) - - @property - def task_id(self): - # XXX compat - return self.id - - @task_id.setter # noqa - def task_id(self, value): - self.id = value - - @property - def task_name(self): - # XXX compat - return self.name - - @task_name.setter # noqa - def task_name(self, value): - self.name = value - - @property - def reply_to(self): - # used by rpc backend when failures reported by parent process - return self.request_dict['reply_to'] - - @property - def correlation_id(self): - # used similarly to reply_to - return self.request_dict['correlation_id'] - - @cached_property - def _payload(self): - return self.body if self._decoded else self.message.payload - - @cached_property - def chord(self): - # used by backend.mark_as_failure when failure is reported - # by parent process - _, _, embed = self._payload - return embed.get('chord') - - @cached_property - def errbacks(self): - # used by backend.mark_as_failure when failure is reported - # by parent process - _, _, embed = self._payload - return embed.get('errbacks') - - @cached_property - def group(self): - # used by backend.on_chord_part_return when failures reported - # by parent process - return self.request_dict['group'] - - -def create_request_cls(base, task, pool, hostname, eventer, - ref=ref, revoked_tasks=revoked_tasks, - task_ready=task_ready): - from celery.app.trace import trace_task_ret as trace - default_time_limit = task.time_limit - default_soft_time_limit = task.soft_time_limit - apply_async = pool.apply_async - acks_late = task.acks_late - events = eventer and eventer.enabled - - class Request(base): - - def execute_using_pool(self, pool, **kwargs): - task_id = self.id - if (self.expires or task_id in revoked_tasks) and self.revoked(): - raise TaskRevokedError(task_id) - - time_limit, soft_time_limit = self.time_limits - result = apply_async( - trace, - args=(self.type, task_id, self.request_dict, self.body, - self.content_type, self.content_encoding), - accept_callback=self.on_accepted, - timeout_callback=self.on_timeout, - callback=self.on_success, - error_callback=self.on_failure, - soft_timeout=soft_time_limit or default_soft_time_limit, - timeout=time_limit or default_time_limit, - correlation_id=task_id, - ) - # cannot create weakref to None - self._apply_result = maybe(ref, result) - return result - - def on_success(self, failed__retval__runtime, **kwargs): - failed, retval, runtime = failed__retval__runtime - if failed: - if isinstance(retval.exception, ( - SystemExit, KeyboardInterrupt)): - raise retval.exception - return self.on_failure(retval, return_ok=True) - task_ready(self) - - if acks_late: - self.acknowledge() - - if events: - self.send_event( - 'task-succeeded', result=retval, runtime=runtime, - ) - - return Request diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/state.py b/thesisenv/lib/python3.6/site-packages/celery/worker/state.py deleted file mode 100644 index 1aa4cbc..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/state.py +++ /dev/null @@ -1,246 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.worker.state - ~~~~~~~~~~~~~~~~~~~ - - Internal worker state (global) - - This includes the currently active and reserved tasks, - statistics, and revoked tasks. - -""" -from __future__ import absolute_import - -import os -import sys -import platform -import shelve -import zlib - -from kombu.serialization import pickle, pickle_protocol -from kombu.utils import cached_property - -from celery import __version__ -from celery.datastructures import LimitedSet -from celery.exceptions import WorkerShutdown, WorkerTerminate -from celery.five import Counter - -__all__ = ['SOFTWARE_INFO', 'reserved_requests', 'active_requests', - 'total_count', 'revoked', 'task_reserved', 'maybe_shutdown', - 'task_accepted', 'task_ready', 'task_reserved', 'task_ready', - 'Persistent'] - -#: Worker software/platform information. -SOFTWARE_INFO = {'sw_ident': 'py-celery', - 'sw_ver': __version__, - 'sw_sys': platform.system()} - -#: maximum number of revokes to keep in memory. -REVOKES_MAX = 50000 - -#: how many seconds a revoke will be active before -#: being expired when the max limit has been exceeded. -REVOKE_EXPIRES = 10800 - -#: set of all reserved :class:`~celery.worker.job.Request`'s. -reserved_requests = set() - -#: set of currently active :class:`~celery.worker.job.Request`'s. -active_requests = set() - -#: count of tasks accepted by the worker, sorted by type. -total_count = Counter() - -#: count of all tasks accepted by the worker -all_total_count = [0] - -#: the list of currently revoked tasks. Persistent if statedb set. -revoked = LimitedSet(maxlen=REVOKES_MAX, expires=REVOKE_EXPIRES) - -#: Update global state when a task has been reserved. -task_reserved = reserved_requests.add - -should_stop = False -should_terminate = False - - -def reset_state(): - reserved_requests.clear() - active_requests.clear() - total_count.clear() - all_total_count[:] = [0] - revoked.clear() - - -def maybe_shutdown(): - if should_stop: - raise WorkerShutdown() - elif should_terminate: - raise WorkerTerminate() - - -def task_accepted(request, _all_total_count=all_total_count): - """Updates global state when a task has been accepted.""" - active_requests.add(request) - total_count[request.name] += 1 - all_total_count[0] += 1 - - -def task_ready(request): - """Updates global state when a task is ready.""" - active_requests.discard(request) - reserved_requests.discard(request) - - -C_BENCH = os.environ.get('C_BENCH') or os.environ.get('CELERY_BENCH') -C_BENCH_EVERY = int(os.environ.get('C_BENCH_EVERY') or - os.environ.get('CELERY_BENCH_EVERY') or 1000) -if C_BENCH: # pragma: no cover - import atexit - - from billiard import current_process - from celery.five import monotonic - from celery.utils.debug import memdump, sample_mem - - all_count = 0 - bench_first = None - bench_start = None - bench_last = None - bench_every = C_BENCH_EVERY - bench_sample = [] - __reserved = task_reserved - __ready = task_ready - - if current_process()._name == 'MainProcess': - @atexit.register - def on_shutdown(): - if bench_first is not None and bench_last is not None: - print('- Time spent in benchmark: {0!r}'.format( - bench_last - bench_first)) - print('- Avg: {0}'.format( - sum(bench_sample) / len(bench_sample))) - memdump() - - def task_reserved(request): # noqa - global bench_start - global bench_first - now = None - if bench_start is None: - bench_start = now = monotonic() - if bench_first is None: - bench_first = now - - return __reserved(request) - - def task_ready(request): # noqa - global all_count - global bench_start - global bench_last - all_count += 1 - if not all_count % bench_every: - now = monotonic() - diff = now - bench_start - print('- Time spent processing {0} tasks (since first ' - 'task received): ~{1:.4f}s\n'.format(bench_every, diff)) - sys.stdout.flush() - bench_start = bench_last = now - bench_sample.append(diff) - sample_mem() - return __ready(request) - - -class Persistent(object): - """This is the persistent data stored by the worker when - :option:`--statedb` is enabled. - - It currently only stores revoked task id's. - - """ - storage = shelve - protocol = pickle_protocol - compress = zlib.compress - decompress = zlib.decompress - _is_open = False - - def __init__(self, state, filename, clock=None): - self.state = state - self.filename = filename - self.clock = clock - self.merge() - - def open(self): - return self.storage.open( - self.filename, protocol=self.protocol, writeback=True, - ) - - def merge(self): - self._merge_with(self.db) - - def sync(self): - self._sync_with(self.db) - self.db.sync() - - def close(self): - if self._is_open: - self.db.close() - self._is_open = False - - def save(self): - self.sync() - self.close() - - def _merge_with(self, d): - self._merge_revoked(d) - self._merge_clock(d) - return d - - def _sync_with(self, d): - self._revoked_tasks.purge() - d.update( - __proto__=3, - zrevoked=self.compress(self._dumps(self._revoked_tasks)), - clock=self.clock.forward() if self.clock else 0, - ) - return d - - def _merge_clock(self, d): - if self.clock: - d['clock'] = self.clock.adjust(d.get('clock') or 0) - - def _merge_revoked(self, d): - try: - self._merge_revoked_v3(d['zrevoked']) - except KeyError: - try: - self._merge_revoked_v2(d.pop('revoked')) - except KeyError: - pass - # purge expired items at boot - self._revoked_tasks.purge() - - def _merge_revoked_v3(self, zrevoked): - if zrevoked: - self._revoked_tasks.update(pickle.loads(self.decompress(zrevoked))) - - def _merge_revoked_v2(self, saved): - if not isinstance(saved, LimitedSet): - # (pre 3.0.18) used to be stored as a dict - return self._merge_revoked_v1(saved) - self._revoked_tasks.update(saved) - - def _merge_revoked_v1(self, saved): - add = self._revoked_tasks.add - for item in saved: - add(item) - - def _dumps(self, obj): - return pickle.dumps(obj, protocol=self.protocol) - - @property - def _revoked_tasks(self): - return self.state.revoked - - @cached_property - def db(self): - self._is_open = True - return self.open() diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/strategy.py b/thesisenv/lib/python3.6/site-packages/celery/worker/strategy.py deleted file mode 100644 index da69b43..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/strategy.py +++ /dev/null @@ -1,95 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.worker.strategy - ~~~~~~~~~~~~~~~~~~~~~~ - - Task execution strategy (optimization). - -""" -from __future__ import absolute_import - -import logging - -from kombu.async.timer import to_timestamp -from kombu.utils.encoding import safe_repr - -from celery.utils.log import get_logger -from celery.utils.timeutils import timezone - -from .job import Request -from .state import task_reserved - -__all__ = ['default'] - -logger = get_logger(__name__) - - -def default(task, app, consumer, - info=logger.info, error=logger.error, task_reserved=task_reserved, - to_system_tz=timezone.to_system): - Req = Request - hostname = consumer.hostname - connection_errors = consumer.connection_errors - _does_info = logger.isEnabledFor(logging.INFO) - - # task event related - # (optimized to avoid calling request.send_event) - eventer = consumer.event_dispatcher - events = eventer and eventer.enabled - send_event = eventer.send - task_sends_events = events and task.send_events - - call_at = consumer.timer.call_at - apply_eta_task = consumer.apply_eta_task - rate_limits_enabled = not consumer.disable_rate_limits - get_bucket = consumer.task_buckets.__getitem__ - handle = consumer.on_task_request - limit_task = consumer._limit_task - - def task_message_handler(message, body, ack, reject, callbacks, - to_timestamp=to_timestamp): - req = Req(body, on_ack=ack, on_reject=reject, - app=app, hostname=hostname, - eventer=eventer, task=task, - connection_errors=connection_errors, - message=message) - if req.revoked(): - return - - if _does_info: - info('Received task: %s', req) - - if task_sends_events: - send_event( - 'task-received', - uuid=req.id, name=req.name, - args=safe_repr(req.args), kwargs=safe_repr(req.kwargs), - retries=req.request_dict.get('retries', 0), - eta=req.eta and req.eta.isoformat(), - expires=req.expires and req.expires.isoformat(), - ) - - if req.eta: - try: - if req.utc: - eta = to_timestamp(to_system_tz(req.eta)) - else: - eta = to_timestamp(req.eta, timezone.local) - except OverflowError as exc: - error("Couldn't convert eta %s to timestamp: %r. Task: %r", - req.eta, exc, req.info(safe=True), exc_info=True) - req.acknowledge() - else: - consumer.qos.increment_eventually() - call_at(eta, apply_eta_task, (req, ), priority=6) - else: - if rate_limits_enabled: - bucket = get_bucket(task.name) - if bucket: - return limit_task(req, bucket, 1) - task_reserved(req) - if callbacks: - [callback() for callback in callbacks] - handle(req) - - return task_message_handler diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/__init__.py b/thesisenv/lib/python3.6/site-packages/djcelery/__init__.py deleted file mode 100644 index 64039d3..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Old django celery integration project.""" -# :copyright: (c) 2009 - 2015 by Ask Solem. -# :license: BSD, see LICENSE for more details. -from __future__ import absolute_import, unicode_literals - -import os -import sys - -VERSION = (3, 2, 2) -__version__ = '.'.join(map(str, VERSION[0:3])) + ''.join(VERSION[3:]) -__author__ = 'Ask Solem' -__contact__ = 'ask@celeryproject.org' -__homepage__ = 'http://celeryproject.org' -__docformat__ = 'restructuredtext' -__license__ = 'BSD (3 clause)' - -# -eof meta- - - -if sys.version_info[0] == 3: - - def setup_loader(): - os.environ.setdefault( - 'CELERY_LOADER', 'djcelery.loaders.DjangoLoader', - ) - -else: - - def setup_loader(): # noqa - os.environ.setdefault( - b'CELERY_LOADER', b'djcelery.loaders.DjangoLoader', - ) - -from celery import current_app as celery # noqa diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/admin.py b/thesisenv/lib/python3.6/site-packages/djcelery/admin.py deleted file mode 100644 index 874bd70..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/admin.py +++ /dev/null @@ -1,385 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -from anyjson import loads - -from django import forms -from django.conf import settings -from django.contrib import admin -from django.contrib.admin import helpers -from django.contrib.admin.views import main as main_views -from django.forms.widgets import Select -from django.shortcuts import render_to_response -from django.template import RequestContext -from django.utils.html import escape -from django.utils.translation import ugettext_lazy as _ - -from celery import current_app -from celery import states -from celery.task.control import broadcast, revoke, rate_limit -from celery.utils import cached_property -from celery.utils.text import abbrtask - -from .admin_utils import action, display_field, fixedwidth -from .models import ( - TaskState, WorkerState, - PeriodicTask, IntervalSchedule, CrontabSchedule, - PeriodicTasks -) -from .humanize import naturaldate -from .utils import is_database_scheduler, make_aware - -try: - from django.utils.encoding import force_text -except ImportError: - from django.utils.encoding import force_unicode as force_text # noqa - - -TASK_STATE_COLORS = {states.SUCCESS: 'green', - states.FAILURE: 'red', - states.REVOKED: 'magenta', - states.STARTED: 'yellow', - states.RETRY: 'orange', - 'RECEIVED': 'blue'} -NODE_STATE_COLORS = {'ONLINE': 'green', - 'OFFLINE': 'gray'} - - -class MonitorList(main_views.ChangeList): - - def __init__(self, *args, **kwargs): - super(MonitorList, self).__init__(*args, **kwargs) - self.title = self.model_admin.list_page_title - - -@display_field(_('state'), 'state') -def colored_state(task): - state = escape(task.state) - color = TASK_STATE_COLORS.get(task.state, 'black') - return '{1}'.format(color, state) - - -@display_field(_('state'), 'last_heartbeat') -def node_state(node): - state = node.is_alive() and 'ONLINE' or 'OFFLINE' - color = NODE_STATE_COLORS[state] - return '{1}'.format(color, state) - - -@display_field(_('ETA'), 'eta') -def eta(task): - if not task.eta: - return 'none' - return escape(make_aware(task.eta)) - - -@display_field(_('when'), 'tstamp') -def tstamp(task): - # convert to local timezone - value = make_aware(task.tstamp) - return '
{1}
'.format( - escape(str(value)), escape(naturaldate(value)), - ) - - -@display_field(_('name'), 'name') -def name(task): - short_name = abbrtask(task.name, 16) - return '
{1}
'.format( - escape(task.name), escape(short_name), - ) - - -class ModelMonitor(admin.ModelAdmin): - can_add = False - can_delete = False - - def get_changelist(self, request, **kwargs): - return MonitorList - - def change_view(self, request, object_id, extra_context=None): - extra_context = extra_context or {} - extra_context.setdefault('title', self.detail_title) - return super(ModelMonitor, self).change_view( - request, object_id, extra_context=extra_context, - ) - - def has_delete_permission(self, request, obj=None): - if not self.can_delete: - return False - return super(ModelMonitor, self).has_delete_permission(request, obj) - - def has_add_permission(self, request): - if not self.can_add: - return False - return super(ModelMonitor, self).has_add_permission(request) - - -class TaskMonitor(ModelMonitor): - detail_title = _('Task detail') - list_page_title = _('Tasks') - rate_limit_confirmation_template = 'djcelery/confirm_rate_limit.html' - date_hierarchy = 'tstamp' - fieldsets = ( - (None, { - 'fields': ('state', 'task_id', 'name', 'args', 'kwargs', - 'eta', 'runtime', 'worker', 'tstamp'), - 'classes': ('extrapretty', ), - }), - ('Details', { - 'classes': ('collapse', 'extrapretty'), - 'fields': ('result', 'traceback', 'expires'), - }), - ) - list_display = ( - fixedwidth('task_id', name=_('UUID'), pt=8), - colored_state, - name, - fixedwidth('args', pretty=True), - fixedwidth('kwargs', pretty=True), - eta, - tstamp, - 'worker', - ) - readonly_fields = ( - 'state', 'task_id', 'name', 'args', 'kwargs', - 'eta', 'runtime', 'worker', 'result', 'traceback', - 'expires', 'tstamp', - ) - list_filter = ('state', 'name', 'tstamp', 'eta', 'worker') - search_fields = ('name', 'task_id', 'args', 'kwargs', 'worker__hostname') - actions = ['revoke_tasks', - 'terminate_tasks', - 'kill_tasks', - 'rate_limit_tasks'] - - class Media: - css = {'all': ('djcelery/style.css', )} - - @action(_('Revoke selected tasks')) - def revoke_tasks(self, request, queryset): - with current_app.default_connection() as connection: - for state in queryset: - revoke(state.task_id, connection=connection) - - @action(_('Terminate selected tasks')) - def terminate_tasks(self, request, queryset): - with current_app.default_connection() as connection: - for state in queryset: - revoke(state.task_id, connection=connection, terminate=True) - - @action(_('Kill selected tasks')) - def kill_tasks(self, request, queryset): - with current_app.default_connection() as connection: - for state in queryset: - revoke(state.task_id, connection=connection, - terminate=True, signal='KILL') - - @action(_('Rate limit selected tasks')) - def rate_limit_tasks(self, request, queryset): - tasks = set([task.name for task in queryset]) - opts = self.model._meta - app_label = opts.app_label - if request.POST.get('post'): - rate = request.POST['rate_limit'] - with current_app.default_connection() as connection: - for task_name in tasks: - rate_limit(task_name, rate, connection=connection) - return None - - context = { - 'title': _('Rate limit selection'), - 'queryset': queryset, - 'object_name': force_text(opts.verbose_name), - 'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME, - 'opts': opts, - 'app_label': app_label, - } - - return render_to_response( - self.rate_limit_confirmation_template, context, - context_instance=RequestContext(request), - ) - - def get_actions(self, request): - actions = super(TaskMonitor, self).get_actions(request) - actions.pop('delete_selected', None) - return actions - - def get_queryset(self, request): - qs = super(TaskMonitor, self).get_queryset(request) - return qs.select_related('worker') - - -class WorkerMonitor(ModelMonitor): - can_add = True - detail_title = _('Node detail') - list_page_title = _('Worker Nodes') - list_display = ('hostname', node_state) - readonly_fields = ('last_heartbeat', ) - actions = ['shutdown_nodes', - 'enable_events', - 'disable_events'] - - @action(_('Shutdown selected worker nodes')) - def shutdown_nodes(self, request, queryset): - broadcast('shutdown', destination=[n.hostname for n in queryset]) - - @action(_('Enable event mode for selected nodes.')) - def enable_events(self, request, queryset): - broadcast('enable_events', - destination=[n.hostname for n in queryset]) - - @action(_('Disable event mode for selected nodes.')) - def disable_events(self, request, queryset): - broadcast('disable_events', - destination=[n.hostname for n in queryset]) - - def get_actions(self, request): - actions = super(WorkerMonitor, self).get_actions(request) - actions.pop('delete_selected', None) - return actions - - -admin.site.register(TaskState, TaskMonitor) -admin.site.register(WorkerState, WorkerMonitor) - - -# ### Periodic Tasks - - -class TaskSelectWidget(Select): - celery_app = current_app - _choices = None - - def tasks_as_choices(self): - _ = self._modules # noqa - tasks = list(sorted(name for name in self.celery_app.tasks - if not name.startswith('celery.'))) - return (('', ''), ) + tuple(zip(tasks, tasks)) - - @property - def choices(self): - if self._choices is None: - self._choices = self.tasks_as_choices() - return self._choices - - @choices.setter - def choices(self, _): - # ChoiceField.__init__ sets ``self.choices = choices`` - # which would override ours. - pass - - @cached_property - def _modules(self): - self.celery_app.loader.import_default_modules() - - -class TaskChoiceField(forms.ChoiceField): - widget = TaskSelectWidget - - def valid_value(self, value): - return True - - -class PeriodicTaskForm(forms.ModelForm): - regtask = TaskChoiceField(label=_('Task (registered)'), - required=False) - task = forms.CharField(label=_('Task (custom)'), required=False, - max_length=200) - - class Meta: - model = PeriodicTask - exclude = () - - def clean(self): - data = super(PeriodicTaskForm, self).clean() - regtask = data.get('regtask') - if regtask: - data['task'] = regtask - if not data['task']: - exc = forms.ValidationError(_('Need name of task')) - self._errors['task'] = self.error_class(exc.messages) - raise exc - return data - - def _clean_json(self, field): - value = self.cleaned_data[field] - try: - loads(value) - except ValueError as exc: - raise forms.ValidationError( - _('Unable to parse JSON: %s') % exc, - ) - return value - - def clean_args(self): - return self._clean_json('args') - - def clean_kwargs(self): - return self._clean_json('kwargs') - - -class PeriodicTaskAdmin(admin.ModelAdmin): - form = PeriodicTaskForm - model = PeriodicTask - list_display = ( - 'enabled', - '__unicode__', - 'task', - 'args', - 'kwargs', - ) - search_fields = ('name', 'task') - list_display_links = ('enabled', '__unicode__', 'task') - ordering = ('-enabled', 'name') - fieldsets = ( - (None, { - 'fields': ('name', 'regtask', 'task', 'enabled'), - 'classes': ('extrapretty', 'wide'), - }), - ('Schedule', { - 'fields': ('interval', 'crontab'), - 'classes': ('extrapretty', 'wide', ), - }), - ('Arguments', { - 'fields': ('args', 'kwargs'), - 'classes': ('extrapretty', 'wide', 'collapse'), - }), - ('Execution Options', { - 'fields': ('expires', 'queue', 'exchange', 'routing_key'), - 'classes': ('extrapretty', 'wide', 'collapse'), - }), - ) - actions = ['enable_tasks', - 'disable_tasks'] - - def update_periodic_tasks(self): - dummy_periodic_task = PeriodicTask() - dummy_periodic_task.no_changes = False - PeriodicTasks.changed(dummy_periodic_task) - - @action(_('Enable selected periodic tasks')) - def enable_tasks(self, request, queryset): - queryset.update(enabled=True) - self.update_periodic_tasks() - - @action(_('Disable selected periodic tasks')) - def disable_tasks(self, request, queryset): - queryset.update(enabled=False) - self.update_periodic_tasks() - - def changelist_view(self, request, extra_context=None): - extra_context = extra_context or {} - scheduler = getattr(settings, 'CELERYBEAT_SCHEDULER', None) - extra_context['wrong_scheduler'] = not is_database_scheduler(scheduler) - return super(PeriodicTaskAdmin, self).changelist_view(request, - extra_context) - - def get_queryset(self, request): - qs = super(PeriodicTaskAdmin, self).get_queryset(request) - return qs.select_related('interval', 'crontab') - - -admin.site.register(IntervalSchedule) -admin.site.register(CrontabSchedule) -admin.site.register(PeriodicTask, PeriodicTaskAdmin) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/admin_utils.py b/thesisenv/lib/python3.6/site-packages/djcelery/admin_utils.py deleted file mode 100644 index da5b39e..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/admin_utils.py +++ /dev/null @@ -1,50 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -from pprint import pformat - -from django.utils.html import escape - -FIXEDWIDTH_STYLE = '''\ -{2} \ -''' - - -def attrs(**kwargs): - def _inner(fun): - for attr_name, attr_value in kwargs.items(): - setattr(fun, attr_name, attr_value) - return fun - return _inner - - -def display_field(short_description, admin_order_field, - allow_tags=True, **kwargs): - return attrs(short_description=short_description, - admin_order_field=admin_order_field, - allow_tags=allow_tags, **kwargs) - - -def action(short_description, **kwargs): - return attrs(short_description=short_description, **kwargs) - - -def fixedwidth(field, name=None, pt=6, width=16, maxlen=64, pretty=False): - - @display_field(name or field, field) - def f(task): - val = getattr(task, field) - if pretty: - val = pformat(val, width=width) - if val.startswith("u'") or val.startswith('u"'): - val = val[2:-1] - shortval = val.replace(',', ',\n') - shortval = shortval.replace('\n', '|br/|') - - if len(shortval) > maxlen: - shortval = shortval[:maxlen] + '...' - styled = FIXEDWIDTH_STYLE.format( - escape(val[:255]), pt, escape(shortval), - ) - return styled.replace('|br/|', '
') - return f diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/app.py b/thesisenv/lib/python3.6/site-packages/djcelery/app.py deleted file mode 100644 index 7b75759..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/app.py +++ /dev/null @@ -1,7 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -from celery import current_app - - -#: The Django-Celery app instance. -app = current_app._get_current_object() diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/backends/__init__.py b/thesisenv/lib/python3.6/site-packages/djcelery/backends/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/backends/cache.py b/thesisenv/lib/python3.6/site-packages/djcelery/backends/cache.py deleted file mode 100644 index 203326b..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/backends/cache.py +++ /dev/null @@ -1,34 +0,0 @@ -"""celery.backends.cache""" -from __future__ import absolute_import, unicode_literals - -from datetime import timedelta - -from django.core.cache import cache, caches - -from celery import current_app -from celery.backends.base import KeyValueStoreBackend - -# CELERY_CACHE_BACKEND overrides the django-global(tm) backend settings. -if current_app.conf.CELERY_CACHE_BACKEND: - cache = caches[current_app.conf.CELERY_CACHE_BACKEND] # noqa - - -class CacheBackend(KeyValueStoreBackend): - """Backend using the Django cache framework to store task metadata.""" - - def __init__(self, *args, **kwargs): - super(CacheBackend, self).__init__(*args, **kwargs) - expires = kwargs.get('expires', - current_app.conf.CELERY_TASK_RESULT_EXPIRES) - if isinstance(expires, timedelta): - expires = int(max(expires.total_seconds(), 0)) - self.expires = expires - - def get(self, key): - return cache.get(key) - - def set(self, key, value): - cache.set(key, value, self.expires) - - def delete(self, key): - cache.delete(key) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/backends/database.py b/thesisenv/lib/python3.6/site-packages/djcelery/backends/database.py deleted file mode 100644 index 8d640a0..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/backends/database.py +++ /dev/null @@ -1,65 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -from celery import current_app -from celery.backends.base import BaseDictBackend - -try: - from celery.utils.timeutils import maybe_timedelta -except ImportError: - from celery.utils.time import maybe_timedelta - -from ..models import TaskMeta, TaskSetMeta - - -class DatabaseBackend(BaseDictBackend): - """The database backend. - - Using Django models to store task state. - - """ - TaskModel = TaskMeta - TaskSetModel = TaskSetMeta - - expires = current_app.conf.CELERY_TASK_RESULT_EXPIRES - create_django_tables = True - - subpolling_interval = 0.5 - - def _store_result(self, task_id, result, status, - traceback=None, request=None): - """Store return value and status of an executed task.""" - self.TaskModel._default_manager.store_result( - task_id, result, status, - traceback=traceback, children=self.current_task_children(request), - ) - return result - - def _save_group(self, group_id, result): - """Store the result of an executed group.""" - self.TaskSetModel._default_manager.store_result(group_id, result) - return result - - def _get_task_meta_for(self, task_id): - """Get task metadata for a task by id.""" - return self.TaskModel._default_manager.get_task(task_id).to_dict() - - def _restore_group(self, group_id): - """Get group metadata for a group by id.""" - meta = self.TaskSetModel._default_manager.restore_taskset(group_id) - if meta: - return meta.to_dict() - - def _delete_group(self, group_id): - self.TaskSetModel._default_manager.delete_taskset(group_id) - - def _forget(self, task_id): - try: - self.TaskModel._default_manager.get(task_id=task_id).delete() - except self.TaskModel.DoesNotExist: - pass - - def cleanup(self): - """Delete expired metadata.""" - expires = maybe_timedelta(self.expires) - for model in self.TaskModel, self.TaskSetModel: - model._default_manager.delete_expired(expires) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/common.py b/thesisenv/lib/python3.6/site-packages/djcelery/common.py deleted file mode 100644 index a6535db..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/common.py +++ /dev/null @@ -1,72 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -from contextlib import contextmanager -from functools import wraps - -from django.utils import translation - - -@contextmanager -def respect_language(language): - """Context manager that changes the current translation language for - all code inside the following block. - - Can e.g. be used inside tasks like this:: - - from celery import task - from djcelery.common import respect_language - - @task - def my_task(language=None): - with respect_language(language): - pass - """ - if language: - prev = translation.get_language() - translation.activate(language) - try: - yield - finally: - translation.activate(prev) - else: - yield - - -def respects_language(fun): - """Decorator for tasks with respect to site's current language. - You can use this decorator on your tasks together with default @task - decorator (remember that the task decorator must be applied last). - - See also the with-statement alternative :func:`respect_language`. - - **Example**: - - .. code-block:: python - - @task - @respects_language - def my_task() - # localize something. - - The task will then accept a ``language`` argument that will be - used to set the language in the task, and the task can thus be - called like: - - .. code-block:: python - - from django.utils import translation - from myapp.tasks import my_task - - # Pass the current language on to the task - my_task.delay(language=translation.get_language()) - - # or set the language explicitly - my_task.delay(language='no.no') - - """ - - @wraps(fun) - def _inner(*args, **kwargs): - with respect_language(kwargs.pop('language', None)): - return fun(*args, **kwargs) - return _inner diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/compat.py b/thesisenv/lib/python3.6/site-packages/djcelery/compat.py deleted file mode 100644 index fc797a3..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/compat.py +++ /dev/null @@ -1,44 +0,0 @@ -from __future__ import absolute_import - -import os -import sys - -from kombu.utils.encoding import bytes_to_str, str_to_bytes - -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 - - -def python_2_unicode_compatible(cls): - """Taken from Django project (django/utils/encoding.py) & modified a bit to - always have __unicode__ method available. - """ - if '__str__' not in cls.__dict__: - raise ValueError("@python_2_unicode_compatible cannot be applied " - "to %s because it doesn't define __str__()." % - cls.__name__) - - cls.__unicode__ = cls.__str__ - - if PY2: - cls.__str__ = lambda self: self.__unicode__().encode('utf-8') - - return cls - - -if PY3: - unicode = str - - def itervalues(x): - return x.values() - - def setenv(k, v): - os.environ[bytes_to_str(k)] = bytes_to_str(v) -else: - unicode = unicode - - def itervalues(x): # noqa - return x.itervalues() - - def setenv(k, v): # noqa - os.environ[str_to_bytes(k)] = str_to_bytes(v) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/contrib/__init__.py b/thesisenv/lib/python3.6/site-packages/djcelery/contrib/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/contrib/test_runner.py b/thesisenv/lib/python3.6/site-packages/djcelery/contrib/test_runner.py deleted file mode 100644 index e65de29..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/contrib/test_runner.py +++ /dev/null @@ -1,69 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -from django.conf import settings -try: - from django.test.runner import DiscoverRunner -except ImportError: - from django.test.simple import DjangoTestSuiteRunner as DiscoverRunner - -from celery import current_app -from celery.task import Task -from djcelery.backends.database import DatabaseBackend - - -USAGE = """\ -Custom test runner to allow testing of celery delayed tasks. -""" - - -def _set_eager(): - settings.CELERY_ALWAYS_EAGER = True - current_app.conf.CELERY_ALWAYS_EAGER = True - settings.CELERY_EAGER_PROPAGATES_EXCEPTIONS = True # Issue #75 - current_app.conf.CELERY_EAGER_PROPAGATES_EXCEPTIONS = True - - -class CeleryTestSuiteRunner(DiscoverRunner): - """Django test runner allowing testing of celery delayed tasks. - - All tasks are run locally, not in a worker. - - To use this runner set ``settings.TEST_RUNNER``:: - - TEST_RUNNER = 'djcelery.contrib.test_runner.CeleryTestSuiteRunner' - - """ - def setup_test_environment(self, **kwargs): - _set_eager() - super(CeleryTestSuiteRunner, self).setup_test_environment(**kwargs) - - -class CeleryTestSuiteRunnerStoringResult(DiscoverRunner): - """Django test runner allowing testing of celery delayed tasks, - and storing the results of those tasks in ``TaskMeta``. - - Requires setting CELERY_RESULT_BACKEND = 'database'. - - USAGE: - - In ``settings.py``:: - - TEST_RUNNER = ''' - djcelery.contrib.test_runner.CeleryTestSuiteRunnerStoringResult - '''.strip() - - """ - - def setup_test_environment(self, **kwargs): - # Monkey-patch Task.on_success() method - def on_success_patched(self, retval, task_id, args, kwargs): - app = current_app._get_current_object() - DatabaseBackend(app=app).store_result(task_id, retval, 'SUCCESS') - Task.on_success = classmethod(on_success_patched) - - super(CeleryTestSuiteRunnerStoringResult, self).setup_test_environment( - **kwargs - ) - - settings.CELERY_RESULT_BACKEND = 'database' - _set_eager() diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/db.py b/thesisenv/lib/python3.6/site-packages/djcelery/db.py deleted file mode 100644 index 2204083..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/db.py +++ /dev/null @@ -1,63 +0,0 @@ -from __future__ import absolute_import - -import django - -from contextlib import contextmanager -from django.db import transaction - -if django.VERSION < (1, 6): # pragma: no cover - - def get_queryset(s): - return s.get_query_set() -else: - def get_queryset(s): # noqa - return s.get_queryset() - -try: - from django.db.transaction import atomic # noqa -except ImportError: # pragma: no cover - - try: - from django.db.transaction import Transaction # noqa - except ImportError: - @contextmanager - def commit_on_success(*args, **kwargs): - try: - transaction.enter_transaction_management(*args, **kwargs) - transaction.managed(True, *args, **kwargs) - try: - yield - except: - if transaction.is_dirty(*args, **kwargs): - transaction.rollback(*args, **kwargs) - raise - else: - if transaction.is_dirty(*args, **kwargs): - try: - transaction.commit(*args, **kwargs) - except: - transaction.rollback(*args, **kwargs) - raise - finally: - transaction.leave_transaction_management(*args, **kwargs) - else: # pragma: no cover - from django.db.transaction import commit_on_success # noqa - - commit_unless_managed = transaction.commit_unless_managed - rollback_unless_managed = transaction.rollback_unless_managed -else: - @contextmanager - def commit_on_success(using=None): # noqa - connection = transaction.get_connection(using) - if connection.features.autocommits_when_autocommit_is_off: - # ignore stupid warnings and errors - yield - else: - with transaction.atomic(using): - yield - - def commit_unless_managed(*args, **kwargs): # noqa - pass - - def rollback_unless_managed(*args, **kwargs): # noqa - pass diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/humanize.py b/thesisenv/lib/python3.6/site-packages/djcelery/humanize.py deleted file mode 100644 index 74517cc..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/humanize.py +++ /dev/null @@ -1,85 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -from datetime import datetime - -from django.utils.translation import ungettext, ugettext as _ -from .utils import now - - -def pluralize_year(n): - return ungettext(_('{num} year ago'), _('{num} years ago'), n) - - -def pluralize_month(n): - return ungettext(_('{num} month ago'), _('{num} months ago'), n) - - -def pluralize_week(n): - return ungettext(_('{num} week ago'), _('{num} weeks ago'), n) - - -def pluralize_day(n): - return ungettext(_('{num} day ago'), _('{num} days ago'), n) - - -OLDER_CHUNKS = ( - (365.0, pluralize_year), - (30.0, pluralize_month), - (7.0, pluralize_week), - (1.0, pluralize_day), -) - - -def _un(singular__plural, n=None): - singular, plural = singular__plural - return ungettext(singular, plural, n) - - -def naturaldate(date, include_seconds=False): - """Convert datetime into a human natural date string.""" - - if not date: - return '' - - right_now = now() - today = datetime(right_now.year, right_now.month, - right_now.day, tzinfo=right_now.tzinfo) - delta = right_now - date - delta_midnight = today - date - - days = delta.days - hours = delta.seconds // 3600 - minutes = delta.seconds // 60 - seconds = delta.seconds - - if days < 0: - return _('just now') - - if days == 0: - if hours == 0: - if minutes > 0: - return ungettext( - _('{minutes} minute ago'), - _('{minutes} minutes ago'), minutes - ).format(minutes=minutes) - else: - if include_seconds and seconds: - return ungettext( - _('{seconds} second ago'), - _('{seconds} seconds ago'), seconds - ).format(seconds=seconds) - return _('just now') - else: - return ungettext( - _('{hours} hour ago'), _('{hours} hours ago'), hours - ).format(hours=hours) - - if delta_midnight.days == 0: - return _('yesterday at {time}').format(time=date.strftime('%H:%M')) - - count = 0 - for chunk, pluralizefun in OLDER_CHUNKS: - if days >= chunk: - count = int(round((delta_midnight.days + 1) / chunk, 0)) - fmt = pluralizefun(count) - return fmt.format(num=count) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/loaders.py b/thesisenv/lib/python3.6/site-packages/djcelery/loaders.py deleted file mode 100644 index b19e07a..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/loaders.py +++ /dev/null @@ -1,202 +0,0 @@ -from __future__ import absolute_import - -import os -import imp -import importlib - -from datetime import datetime -from warnings import warn - -from celery import signals -try: - from celery.utils.collections import DictAttribute -except ImportError: - from celery.datastructures import DictAttribute -from celery.loaders.base import BaseLoader - -from django import db -from django.conf import settings -from django.core import cache -from django.core.mail import mail_admins - -from .utils import DATABASE_ERRORS, now - -_RACE_PROTECTION = False - - -def _maybe_close_fd(fh): - try: - os.close(fh.fileno()) - except (AttributeError, OSError, TypeError): - # TypeError added for celery#962 - pass - - -class DjangoLoader(BaseLoader): - """The Django loader.""" - _db_reuse = 0 - - override_backends = { - 'database': 'djcelery.backends.database.DatabaseBackend', - 'cache': 'djcelery.backends.cache.CacheBackend', - } - - def __init__(self, *args, **kwargs): - super(DjangoLoader, self).__init__(*args, **kwargs) - self._install_signal_handlers() - - def _install_signal_handlers(self): - # Need to close any open database connection after - # any embedded celerybeat process forks. - signals.beat_embedded_init.connect(self.close_database) - signals.worker_ready.connect(self.warn_if_debug) - - def now(self, utc=False): - return datetime.utcnow() if utc else now() - - def read_configuration(self): - """Load configuration from Django settings.""" - self.configured = True - # Default backend needs to be the database backend for backward - # compatibility. - backend = (getattr(settings, 'CELERY_RESULT_BACKEND', None) or - getattr(settings, 'CELERY_BACKEND', None)) - if not backend: - settings.CELERY_RESULT_BACKEND = 'database' - return DictAttribute(settings) - - def _close_database(self): - try: - funs = [conn.close for conn in db.connections] - except AttributeError: - if hasattr(db, 'close_old_connections'): # Django 1.6+ - funs = [db.close_old_connections] - else: - funs = [db.close_connection] # pre multidb - - for close in funs: - try: - close() - except DATABASE_ERRORS as exc: - str_exc = str(exc) - if 'closed' not in str_exc and 'not connected' not in str_exc: - raise - - def close_database(self, **kwargs): - db_reuse_max = self.conf.get('CELERY_DB_REUSE_MAX', None) - if not db_reuse_max: - return self._close_database() - if self._db_reuse >= db_reuse_max * 2: - self._db_reuse = 0 - self._close_database() - self._db_reuse += 1 - - def close_cache(self): - try: - cache.cache.close() - except (TypeError, AttributeError): - pass - - def on_process_cleanup(self): - """Does everything necessary for Django to work in a long-living, - multiprocessing environment. - - """ - # See http://groups.google.com/group/django-users/ - # browse_thread/thread/78200863d0c07c6d/ - self.close_database() - self.close_cache() - - def on_task_init(self, task_id, task): - """Called before every task.""" - try: - is_eager = task.request.is_eager - except AttributeError: - is_eager = False - if not is_eager: - self.close_database() - - def on_worker_init(self): - """Called when the worker starts. - - Automatically discovers any ``tasks.py`` files in the applications - listed in ``INSTALLED_APPS``. - - """ - self.import_default_modules() - - self.close_database() - self.close_cache() - - def warn_if_debug(self, **kwargs): - if settings.DEBUG: - warn('Using settings.DEBUG leads to a memory leak, never ' - 'use this setting in production environments!') - - def import_default_modules(self): - super(DjangoLoader, self).import_default_modules() - self.autodiscover() - - def autodiscover(self): - self.task_modules.update(mod.__name__ for mod in autodiscover() or ()) - - def on_worker_process_init(self): - # the parent process may have established these, - # so need to close them. - - # calling db.close() on some DB connections will cause - # the inherited DB conn to also get broken in the parent - # process so we need to remove it without triggering any - # network IO that close() might cause. - try: - for c in db.connections.all(): - if c and c.connection: - _maybe_close_fd(c.connection) - except AttributeError: - if db.connection and db.connection.connection: - _maybe_close_fd(db.connection.connection) - - # use the _ version to avoid DB_REUSE preventing the conn.close() call - self._close_database() - self.close_cache() - - def mail_admins(self, subject, body, fail_silently=False, **kwargs): - return mail_admins(subject, body, fail_silently=fail_silently) - - -def autodiscover(): - """Include tasks for all applications in ``INSTALLED_APPS``.""" - global _RACE_PROTECTION - - if _RACE_PROTECTION: - return - _RACE_PROTECTION = True - try: - return filter(None, [find_related_module(app, 'tasks') - for app in settings.INSTALLED_APPS]) - finally: - _RACE_PROTECTION = False - - -def find_related_module(app, related_name): - """Given an application name and a module name, tries to find that - module in the application.""" - - try: - app_path = importlib.import_module(app).__path__ - except ImportError as exc: - warn('Autodiscover: Error importing %s.%s: %r' % ( - app, related_name, exc, - )) - return - except AttributeError: - return - - try: - f, _, _ = imp.find_module(related_name, app_path) - # f is returned None when app_path is a module - f and f.close() - except ImportError: - return - - return importlib.import_module('{0}.{1}'.format(app, related_name)) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/management/__init__.py b/thesisenv/lib/python3.6/site-packages/djcelery/management/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/management/base.py b/thesisenv/lib/python3.6/site-packages/djcelery/management/base.py deleted file mode 100644 index c999477..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/management/base.py +++ /dev/null @@ -1,142 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -import celery -import djcelery -import sys - -from django.core.management.base import BaseCommand - -from djcelery.compat import setenv - -DB_SHARED_THREAD = """\ -DatabaseWrapper objects created in a thread can only \ -be used in that same thread. The object with alias '{0}' \ -was created in thread id {1} and this is thread id {2}.\ -""" - - -def patch_thread_ident(): - # monkey patch django. - # This patch make sure that we use real threads to get the ident which - # is going to happen if we are using gevent or eventlet. - # -- patch taken from gunicorn - if getattr(patch_thread_ident, 'called', False): - return - try: - from django.db.backends import BaseDatabaseWrapper, DatabaseError - - if 'validate_thread_sharing' in BaseDatabaseWrapper.__dict__: - import thread - _get_ident = thread.get_ident - - __old__init__ = BaseDatabaseWrapper.__init__ - - def _init(self, *args, **kwargs): - __old__init__(self, *args, **kwargs) - self._thread_ident = _get_ident() - - def _validate_thread_sharing(self): - if (not self.allow_thread_sharing and - self._thread_ident != _get_ident()): - raise DatabaseError( - DB_SHARED_THREAD % ( - self.alias, self._thread_ident, _get_ident()), - ) - - BaseDatabaseWrapper.__init__ = _init - BaseDatabaseWrapper.validate_thread_sharing = \ - _validate_thread_sharing - - patch_thread_ident.called = True - except ImportError: - pass - - -patch_thread_ident() - - -class CeleryCommand(BaseCommand): - options = () - if hasattr(BaseCommand, 'option_list'): - options = BaseCommand.option_list - else: - def add_arguments(self, parser): - option_typemap = { - "string": str, - "int": int, - "float": float - } - for opt in self.option_list: - option = {k: v - for k, v in opt.__dict__.items() - if v is not None} - flags = (option.get("_long_opts", []) + - option.get("_short_opts", [])) - if option.get('default') == ('NO', 'DEFAULT'): - option['default'] = None - if option.get("nargs") == 1: - del option["nargs"] - del option["_long_opts"] - del option["_short_opts"] - if "type" in option: - opttype = option["type"] - option["type"] = option_typemap.get(opttype, opttype) - parser.add_argument(*flags, **option) - - skip_opts = ['--app', '--loader', '--config', '--no-color'] - requires_system_checks = False - keep_base_opts = False - stdout, stderr = sys.stdout, sys.stderr - - def get_version(self): - return 'celery {c.__version__}\ndjango-celery {d.__version__}'.format( - c=celery, d=djcelery, - ) - - def execute(self, *args, **options): - broker = options.get('broker') - if broker: - self.set_broker(broker) - super(CeleryCommand, self).execute(*args, **options) - - def set_broker(self, broker): - setenv('CELERY_BROKER_URL', broker) - - def run_from_argv(self, argv): - self.handle_default_options(argv[2:]) - return super(CeleryCommand, self).run_from_argv(argv) - - def handle_default_options(self, argv): - acc = [] - broker = None - for i, arg in enumerate(argv): - # --settings and --pythonpath are also handled - # by BaseCommand.handle_default_options, but that is - # called with the resulting options parsed by optparse. - if '--settings=' in arg: - _, settings_module = arg.split('=') - setenv('DJANGO_SETTINGS_MODULE', settings_module) - elif '--pythonpath=' in arg: - _, pythonpath = arg.split('=') - sys.path.insert(0, pythonpath) - elif '--broker=' in arg: - _, broker = arg.split('=') - elif arg == '-b': - broker = argv[i + 1] - else: - acc.append(arg) - if broker: - self.set_broker(broker) - return argv if self.keep_base_opts else acc - - def die(self, msg): - sys.stderr.write(msg) - sys.stderr.write('\n') - sys.exit() - - def _is_unwanted_option(self, option): - return option._long_opts and option._long_opts[0] in self.skip_opts - - @property - def option_list(self): - return [x for x in self.options if not self._is_unwanted_option(x)] diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/__init__.py b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celery.py b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celery.py deleted file mode 100644 index 6e842d7..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celery.py +++ /dev/null @@ -1,22 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -from celery.bin import celery - -from djcelery.app import app -from djcelery.management.base import CeleryCommand - -base = celery.CeleryCommand(app=app) - - -class Command(CeleryCommand): - """The celery command.""" - help = 'celery commands, see celery help' - options = (CeleryCommand.options + - base.get_options() + - base.preload_options) - - def run_from_argv(self, argv): - argv = self.handle_default_options(argv) - base.execute_from_commandline( - ['{0[0]} {0[1]}'.format(argv)] + argv[2:], - ) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celerybeat.py b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celerybeat.py deleted file mode 100644 index e4573dc..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celerybeat.py +++ /dev/null @@ -1,24 +0,0 @@ -""" - -Start the celery clock service from the Django management command. - -""" -from __future__ import absolute_import, unicode_literals - -from celery.bin import beat - -from djcelery.app import app -from djcelery.management.base import CeleryCommand - -beat = beat.beat(app=app) - - -class Command(CeleryCommand): - """Run the celery periodic task scheduler.""" - options = (CeleryCommand.options + - beat.get_options() + - beat.preload_options) - help = 'Old alias to the "celery beat" command.' - - def handle(self, *args, **options): - beat.run(*args, **options) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celerycam.py b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celerycam.py deleted file mode 100644 index 2849b44..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celerycam.py +++ /dev/null @@ -1,26 +0,0 @@ -""" - -Shortcut to the Django snapshot service. - -""" -from __future__ import absolute_import, unicode_literals - -from celery.bin import events - -from djcelery.app import app -from djcelery.management.base import CeleryCommand - -ev = events.events(app=app) - - -class Command(CeleryCommand): - """Run the celery curses event viewer.""" - options = (CeleryCommand.options + - ev.get_options() + - ev.preload_options) - help = 'Takes snapshots of the clusters state to the database.' - - def handle(self, *args, **options): - """Handle the management command.""" - options['camera'] = 'djcelery.snapshot.Camera' - ev.run(*args, **options) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celeryd.py b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celeryd.py deleted file mode 100644 index 0ed4c40..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celeryd.py +++ /dev/null @@ -1,25 +0,0 @@ -""" - -Start the celery daemon from the Django management command. - -""" -from __future__ import absolute_import, unicode_literals - -from celery.bin import worker - -from djcelery.app import app -from djcelery.management.base import CeleryCommand - -worker = worker.worker(app=app) - - -class Command(CeleryCommand): - """Run the celery daemon.""" - help = 'Old alias to the "celery worker" command.' - options = (CeleryCommand.options + - worker.get_options() + - worker.preload_options) - - def handle(self, *args, **options): - worker.check_args(args) - worker.run(**options) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celeryd_detach.py b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celeryd_detach.py deleted file mode 100644 index 3f2533b..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celeryd_detach.py +++ /dev/null @@ -1,25 +0,0 @@ -""" - -Start detached worker node from the Django management utility. - -""" -from __future__ import absolute_import, unicode_literals - -import os -import sys - -from celery.bin import celeryd_detach - -from djcelery.management.base import CeleryCommand - - -class Command(CeleryCommand): - """Run the celery daemon.""" - help = 'Runs a detached Celery worker node.' - options = celeryd_detach.OPTION_LIST - - def run_from_argv(self, argv): - - class detached(celeryd_detach.detached_celeryd): - execv_argv = [os.path.abspath(sys.argv[0]), 'celery', 'worker'] - detached().execute_from_commandline(argv) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celeryd_multi.py b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celeryd_multi.py deleted file mode 100644 index a852302..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celeryd_multi.py +++ /dev/null @@ -1,25 +0,0 @@ -""" - -Utility to manage multiple worker instances. - -""" -from __future__ import absolute_import, unicode_literals - -from celery.bin import multi - -from djcelery.management.base import CeleryCommand - - -class Command(CeleryCommand): - """Run the celery daemon.""" - args = '[name1, [name2, [...]> [worker options]' - help = 'Manage multiple Celery worker nodes.' - options = () - keep_base_opts = True - - def run_from_argv(self, argv): - argv = self.handle_default_options(argv) - argv.append('--cmd={0[0]} celeryd_detach'.format(argv)) - multi.MultiTool().execute_from_commandline( - ['{0[0]} {0[1]}'.format(argv)] + argv[2:], - ) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celerymon.py b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celerymon.py deleted file mode 100644 index 91317a4..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celerymon.py +++ /dev/null @@ -1,42 +0,0 @@ -""" - -Start the celery clock service from the Django management command. - -""" -from __future__ import absolute_import, unicode_literals - -import sys - -from djcelery.app import app -from djcelery.management.base import CeleryCommand - -try: - from celerymon.bin.celerymon import MonitorCommand - mon = MonitorCommand(app=app) -except ImportError: - mon = None - -MISSING = """ -You don't have celerymon installed, please install it by running the following -command: - - $ pip install -U celerymon - -or if you're still using easy_install (shame on you!) - - $ easy_install -U celerymon -""" - - -class Command(CeleryCommand): - """Run the celery monitor.""" - options = (CeleryCommand.options + - (mon and mon.get_options() + mon.preload_options or ())) - help = 'Run the celery monitor' - - def handle(self, *args, **options): - """Handle the management command.""" - if mon is None: - sys.stderr.write(MISSING) - else: - mon.run(**options) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/djcelerymon.py b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/djcelerymon.py deleted file mode 100644 index 29a09fc..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/djcelerymon.py +++ /dev/null @@ -1,48 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -import sys -import threading - -from celery.bin import events - -from django.core.management.commands import runserver - -from djcelery.app import app -from djcelery.management.base import CeleryCommand - -ev = events.events(app=app) - - -class WebserverThread(threading.Thread): - - def __init__(self, addrport='', *args, **options): - threading.Thread.__init__(self) - self.addrport = addrport - self.args = args - self.options = options - - def run(self): - options = dict(self.options, use_reloader=False) - command = runserver.Command() - # see http://code.djangoproject.com/changeset/13319 - command.stdout, command.stderr = sys.stdout, sys.stderr - command.handle(self.addrport, *self.args, **options) - - -class Command(CeleryCommand): - """Run the celery curses event viewer.""" - args = '[optional port number, or ipaddr:port]' - options = (runserver.Command.option_list + - ev.get_options() + - ev.preload_options) - help = 'Starts Django Admin instance and celerycam in the same process.' - # see http://code.djangoproject.com/changeset/13319. - stdout, stderr = sys.stdout, sys.stderr - - def handle(self, addrport='', *args, **options): - """Handle the management command.""" - server = WebserverThread(addrport, *args, **options) - server.start() - options['camera'] = 'djcelery.snapshot.Camera' - options['prog_name'] = 'djcelerymon' - ev.run(*args, **options) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/managers.py b/thesisenv/lib/python3.6/site-packages/djcelery/managers.py deleted file mode 100644 index 91dae00..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/managers.py +++ /dev/null @@ -1,243 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -import warnings - -from functools import wraps -from itertools import count - -from django.db import connection -try: - from django.db import connections, router -except ImportError: # pre-Django 1.2 - connections = router = None # noqa - -from django.db import models -from django.db.models.query import QuerySet -from django.conf import settings - -try: - from celery.utils.timeutils import maybe_timedelta -except ImportError: - from celery.utils.time import maybe_timedelta - -from .db import commit_on_success, get_queryset, rollback_unless_managed -from .utils import now - - -class TxIsolationWarning(UserWarning): - pass - - -def transaction_retry(max_retries=1): - """Decorator for methods doing database operations. - - If the database operation fails, it will retry the operation - at most ``max_retries`` times. - - """ - def _outer(fun): - - @wraps(fun) - def _inner(*args, **kwargs): - _max_retries = kwargs.pop('exception_retry_count', max_retries) - for retries in count(0): - try: - return fun(*args, **kwargs) - except Exception: # pragma: no cover - # Depending on the database backend used we can experience - # various exceptions. E.g. psycopg2 raises an exception - # if some operation breaks the transaction, so saving - # the task result won't be possible until we rollback - # the transaction. - if retries >= _max_retries: - raise - try: - rollback_unless_managed() - except Exception: - pass - return _inner - - return _outer - - -def update_model_with_dict(obj, fields): - [setattr(obj, attr_name, attr_value) - for attr_name, attr_value in fields.items()] - obj.save() - return obj - - -class ExtendedQuerySet(QuerySet): - - def update_or_create(self, **kwargs): - obj, created = self.get_or_create(**kwargs) - - if not created: - fields = dict(kwargs.pop('defaults', {})) - fields.update(kwargs) - update_model_with_dict(obj, fields) - - return obj, created - - -class ExtendedManager(models.Manager): - - def get_queryset(self): - return ExtendedQuerySet(self.model) - get_query_set = get_queryset # Pre django 1.6 - - def update_or_create(self, **kwargs): - return get_queryset(self).update_or_create(**kwargs) - - def connection_for_write(self): - if connections: - return connections[router.db_for_write(self.model)] - return connection - - def connection_for_read(self): - if connections: - return connections[self.db] - return connection - - def current_engine(self): - try: - return settings.DATABASES[self.db]['ENGINE'] - except AttributeError: - return settings.DATABASE_ENGINE - - -class ResultManager(ExtendedManager): - - def get_all_expired(self, expires): - """Get all expired task results.""" - return self.filter(date_done__lt=now() - maybe_timedelta(expires)) - - def delete_expired(self, expires): - """Delete all expired taskset results.""" - meta = self.model._meta - with commit_on_success(): - self.get_all_expired(expires).update(hidden=True) - cursor = self.connection_for_write().cursor() - cursor.execute( - 'DELETE FROM {0.db_table} WHERE hidden=%s'.format(meta), - (True, ), - ) - - -class PeriodicTaskManager(ExtendedManager): - - def enabled(self): - return self.filter(enabled=True) - - -class TaskManager(ResultManager): - """Manager for :class:`celery.models.Task` models.""" - _last_id = None - - def get_task(self, task_id): - """Get task meta for task by ``task_id``. - - :keyword exception_retry_count: How many times to retry by - transaction rollback on exception. This could theoretically - happen in a race condition if another worker is trying to - create the same task. The default is to retry once. - - """ - try: - return self.get(task_id=task_id) - except self.model.DoesNotExist: - if self._last_id == task_id: - self.warn_if_repeatable_read() - self._last_id = task_id - return self.model(task_id=task_id) - - @transaction_retry(max_retries=2) - def store_result(self, task_id, result, status, - traceback=None, children=None): - """Store the result and status of a task. - - :param task_id: task id - - :param result: The return value of the task, or an exception - instance raised by the task. - - :param status: Task status. See - :meth:`celery.result.AsyncResult.get_status` for a list of - possible status values. - - :keyword traceback: The traceback at the point of exception (if the - task failed). - - :keyword children: List of serialized results of subtasks - of this task. - - :keyword exception_retry_count: How many times to retry by - transaction rollback on exception. This could theoretically - happen in a race condition if another worker is trying to - create the same task. The default is to retry twice. - - """ - return self.update_or_create(task_id=task_id, - defaults={'status': status, - 'result': result, - 'traceback': traceback, - 'meta': {'children': children}}) - - def warn_if_repeatable_read(self): - if 'mysql' in self.current_engine().lower(): - cursor = self.connection_for_read().cursor() - if cursor.execute('SELECT @@tx_isolation'): - isolation = cursor.fetchone()[0] - if isolation == 'REPEATABLE-READ': - warnings.warn(TxIsolationWarning( - 'Polling results with transaction isolation level ' - 'repeatable-read within the same transaction ' - 'may give outdated results. Be sure to commit the ' - 'transaction for each poll iteration.')) - - -class TaskSetManager(ResultManager): - """Manager for :class:`celery.models.TaskSet` models.""" - - def restore_taskset(self, taskset_id): - """Get the async result instance by taskset id.""" - try: - return self.get(taskset_id=taskset_id) - except self.model.DoesNotExist: - pass - - def delete_taskset(self, taskset_id): - """Delete a saved taskset result.""" - s = self.restore_taskset(taskset_id) - if s: - s.delete() - - @transaction_retry(max_retries=2) - def store_result(self, taskset_id, result): - """Store the async result instance of a taskset. - - :param taskset_id: task set id - - :param result: The return value of the taskset - - """ - return self.update_or_create(taskset_id=taskset_id, - defaults={'result': result}) - - -class TaskStateManager(ExtendedManager): - - def active(self): - return self.filter(hidden=False) - - def expired(self, states, expires, nowfun=now): - return self.filter(state__in=states, - tstamp__lte=nowfun() - maybe_timedelta(expires)) - - def expire_by_states(self, states, expires): - if expires is not None: - return self.expired(states, expires).update(hidden=True) - - def purge(self): - with commit_on_success(): - self.model.objects.filter(hidden=True).delete() diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/migrations/0001_initial.py b/thesisenv/lib/python3.6/site-packages/djcelery/migrations/0001_initial.py deleted file mode 100644 index 75fe231..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/migrations/0001_initial.py +++ /dev/null @@ -1,163 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import unicode_literals - -from django.db import models, migrations -import djcelery.picklefield - - -class Migration(migrations.Migration): - - dependencies = [ - ] - - operations = [ - migrations.CreateModel( - name='CrontabSchedule', - fields=[ - ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), - ('minute', models.CharField(default='*', max_length=64, verbose_name='minute')), - ('hour', models.CharField(default='*', max_length=64, verbose_name='hour')), - ('day_of_week', models.CharField(default='*', max_length=64, verbose_name='day of week')), - ('day_of_month', models.CharField(default='*', max_length=64, verbose_name='day of month')), - ('month_of_year', models.CharField(default='*', max_length=64, verbose_name='month of year')), - ], - options={ - 'ordering': ['month_of_year', 'day_of_month', 'day_of_week', 'hour', 'minute'], - 'verbose_name': 'crontab', - 'verbose_name_plural': 'crontabs', - }, - bases=(models.Model,), - ), - migrations.CreateModel( - name='IntervalSchedule', - fields=[ - ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), - ('every', models.IntegerField(verbose_name='every')), - ('period', models.CharField(max_length=24, verbose_name='period', choices=[('days', 'Days'), ('hours', 'Hours'), ('minutes', 'Minutes'), ('seconds', 'Seconds'), ('microseconds', 'Microseconds')])), - ], - options={ - 'ordering': ['period', 'every'], - 'verbose_name': 'interval', - 'verbose_name_plural': 'intervals', - }, - bases=(models.Model,), - ), - migrations.CreateModel( - name='PeriodicTask', - fields=[ - ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), - ('name', models.CharField(help_text='Useful description', unique=True, max_length=200, verbose_name='name')), - ('task', models.CharField(max_length=200, verbose_name='task name')), - ('args', models.TextField(default='[]', help_text='JSON encoded positional arguments', verbose_name='Arguments', blank=True)), - ('kwargs', models.TextField(default='{}', help_text='JSON encoded keyword arguments', verbose_name='Keyword arguments', blank=True)), - ('queue', models.CharField(default=None, max_length=200, blank=True, help_text='Queue defined in CELERY_QUEUES', null=True, verbose_name='queue')), - ('exchange', models.CharField(default=None, max_length=200, null=True, verbose_name='exchange', blank=True)), - ('routing_key', models.CharField(default=None, max_length=200, null=True, verbose_name='routing key', blank=True)), - ('expires', models.DateTimeField(null=True, verbose_name='expires', blank=True)), - ('enabled', models.BooleanField(default=True, verbose_name='enabled')), - ('last_run_at', models.DateTimeField(null=True, editable=False, blank=True)), - ('total_run_count', models.PositiveIntegerField(default=0, editable=False)), - ('date_changed', models.DateTimeField(auto_now=True)), - ('description', models.TextField(verbose_name='description', blank=True)), - ('crontab', models.ForeignKey(blank=True, to='djcelery.CrontabSchedule', help_text='Use one of interval/crontab', null=True, verbose_name='crontab', on_delete=models.CASCADE)), - ('interval', models.ForeignKey(verbose_name='interval', blank=True, to='djcelery.IntervalSchedule', null=True, on_delete=models.CASCADE)), - ], - options={ - 'verbose_name': 'periodic task', - 'verbose_name_plural': 'periodic tasks', - }, - bases=(models.Model,), - ), - migrations.CreateModel( - name='PeriodicTasks', - fields=[ - ('ident', models.SmallIntegerField(default=1, unique=True, serialize=False, primary_key=True)), - ('last_update', models.DateTimeField()), - ], - options={ - }, - bases=(models.Model,), - ), - migrations.CreateModel( - name='TaskMeta', - fields=[ - ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), - ('task_id', models.CharField(unique=True, max_length=255, verbose_name='task id')), - ('status', models.CharField(default='PENDING', max_length=50, verbose_name='state', choices=[('FAILURE', 'FAILURE'), ('PENDING', 'PENDING'), ('RECEIVED', 'RECEIVED'), ('RETRY', 'RETRY'), ('REVOKED', 'REVOKED'), ('STARTED', 'STARTED'), ('SUCCESS', 'SUCCESS')])), - ('result', djcelery.picklefield.PickledObjectField(default=None, null=True, editable=False)), - ('date_done', models.DateTimeField(auto_now=True, verbose_name='done at')), - ('traceback', models.TextField(null=True, verbose_name='traceback', blank=True)), - ('hidden', models.BooleanField(default=False, db_index=True, editable=False)), - ('meta', djcelery.picklefield.PickledObjectField(default=None, null=True, editable=False)), - ], - options={ - 'db_table': 'celery_taskmeta', - 'verbose_name': 'task state', - 'verbose_name_plural': 'task states', - }, - bases=(models.Model,), - ), - migrations.CreateModel( - name='TaskSetMeta', - fields=[ - ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), - ('taskset_id', models.CharField(unique=True, max_length=255, verbose_name='group id')), - ('result', djcelery.picklefield.PickledObjectField(editable=False)), - ('date_done', models.DateTimeField(auto_now=True, verbose_name='created at')), - ('hidden', models.BooleanField(default=False, db_index=True, editable=False)), - ], - options={ - 'db_table': 'celery_tasksetmeta', - 'verbose_name': 'saved group result', - 'verbose_name_plural': 'saved group results', - }, - bases=(models.Model,), - ), - migrations.CreateModel( - name='TaskState', - fields=[ - ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), - ('state', models.CharField(db_index=True, max_length=64, verbose_name='state', choices=[('FAILURE', 'FAILURE'), ('PENDING', 'PENDING'), ('RECEIVED', 'RECEIVED'), ('RETRY', 'RETRY'), ('REVOKED', 'REVOKED'), ('STARTED', 'STARTED'), ('SUCCESS', 'SUCCESS')])), - ('task_id', models.CharField(unique=True, max_length=36, verbose_name='UUID')), - ('name', models.CharField(max_length=200, null=True, verbose_name='name', db_index=True)), - ('tstamp', models.DateTimeField(verbose_name='event received at', db_index=True)), - ('args', models.TextField(null=True, verbose_name='Arguments')), - ('kwargs', models.TextField(null=True, verbose_name='Keyword arguments')), - ('eta', models.DateTimeField(null=True, verbose_name='ETA')), - ('expires', models.DateTimeField(null=True, verbose_name='expires')), - ('result', models.TextField(null=True, verbose_name='result')), - ('traceback', models.TextField(null=True, verbose_name='traceback')), - ('runtime', models.FloatField(help_text='in seconds if task succeeded', null=True, verbose_name='execution time')), - ('retries', models.IntegerField(default=0, verbose_name='number of retries')), - ('hidden', models.BooleanField(default=False, db_index=True, editable=False)), - ], - options={ - 'ordering': ['-tstamp'], - 'get_latest_by': 'tstamp', - 'verbose_name': 'task', - 'verbose_name_plural': 'tasks', - }, - bases=(models.Model,), - ), - migrations.CreateModel( - name='WorkerState', - fields=[ - ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), - ('hostname', models.CharField(unique=True, max_length=255, verbose_name='hostname')), - ('last_heartbeat', models.DateTimeField(null=True, verbose_name='last heartbeat', db_index=True)), - ], - options={ - 'ordering': ['-last_heartbeat'], - 'get_latest_by': 'last_heartbeat', - 'verbose_name': 'worker', - 'verbose_name_plural': 'workers', - }, - bases=(models.Model,), - ), - migrations.AddField( - model_name='taskstate', - name='worker', - field=models.ForeignKey(verbose_name='worker', to='djcelery.WorkerState', null=True, on_delete=models.CASCADE), - preserve_default=True, - ), - ] diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/migrations/__init__.py b/thesisenv/lib/python3.6/site-packages/djcelery/migrations/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/models.py b/thesisenv/lib/python3.6/site-packages/djcelery/models.py deleted file mode 100644 index be90f95..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/models.py +++ /dev/null @@ -1,381 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -from datetime import timedelta -from time import time, mktime, gmtime - -from django.core.exceptions import MultipleObjectsReturned, ValidationError -from django.db import models -from django.db.models import signals -from django.utils.translation import ugettext_lazy as _ -from django.conf import settings - -from celery import schedules -from celery import states -from celery.events.state import heartbeat_expires - -from . import managers -from .picklefield import PickledObjectField -from .utils import now -from .compat import python_2_unicode_compatible - -ALL_STATES = sorted(states.ALL_STATES) -TASK_STATE_CHOICES = sorted(zip(ALL_STATES, ALL_STATES)) - - -def cronexp(field): - return field and str(field).replace(' ', '') or '*' - - -@python_2_unicode_compatible -class TaskMeta(models.Model): - """Task result/status.""" - task_id = models.CharField(_('task id'), max_length=255, unique=True) - status = models.CharField( - _('state'), - max_length=50, default=states.PENDING, choices=TASK_STATE_CHOICES, - ) - result = PickledObjectField(null=True, default=None, editable=False) - date_done = models.DateTimeField(_('done at'), auto_now=True) - traceback = models.TextField(_('traceback'), blank=True, null=True) - hidden = models.BooleanField(editable=False, default=False, db_index=True) - # TODO compression was enabled by mistake, we need to disable it - # but this is a backwards incompatible change that needs planning. - meta = PickledObjectField( - compress=True, null=True, default=None, editable=False, - ) - - objects = managers.TaskManager() - - class Meta: - verbose_name = _('task state') - verbose_name_plural = _('task states') - db_table = 'celery_taskmeta' - - def to_dict(self): - return {'task_id': self.task_id, - 'status': self.status, - 'result': self.result, - 'date_done': self.date_done, - 'traceback': self.traceback, - 'children': (self.meta or {}).get('children')} - - def __str__(self): - return ''.format(self) - - -@python_2_unicode_compatible -class TaskSetMeta(models.Model): - """TaskSet result""" - taskset_id = models.CharField(_('group id'), max_length=255, unique=True) - result = PickledObjectField() - date_done = models.DateTimeField(_('created at'), auto_now=True) - hidden = models.BooleanField(editable=False, default=False, db_index=True) - - objects = managers.TaskSetManager() - - class Meta: - """Model meta-data.""" - verbose_name = _('saved group result') - verbose_name_plural = _('saved group results') - db_table = 'celery_tasksetmeta' - - def to_dict(self): - return {'taskset_id': self.taskset_id, - 'result': self.result, - 'date_done': self.date_done} - - def __str__(self): - return ''.format(self) - - -PERIOD_CHOICES = (('days', _('Days')), - ('hours', _('Hours')), - ('minutes', _('Minutes')), - ('seconds', _('Seconds')), - ('microseconds', _('Microseconds'))) - - -@python_2_unicode_compatible -class IntervalSchedule(models.Model): - every = models.IntegerField(_('every'), null=False) - period = models.CharField( - _('period'), max_length=24, choices=PERIOD_CHOICES, - ) - - class Meta: - verbose_name = _('interval') - verbose_name_plural = _('intervals') - ordering = ['period', 'every'] - - @property - def schedule(self): - return schedules.schedule(timedelta(**{self.period: self.every})) - - @classmethod - def from_schedule(cls, schedule, period='seconds'): - every = max(schedule.run_every.total_seconds(), 0) - try: - return cls.objects.get(every=every, period=period) - except cls.DoesNotExist: - return cls(every=every, period=period) - except MultipleObjectsReturned: - cls.objects.filter(every=every, period=period).delete() - return cls(every=every, period=period) - - def __str__(self): - if self.every == 1: - return _('every {0.period_singular}').format(self) - return _('every {0.every:d} {0.period}').format(self) - - @property - def period_singular(self): - return self.period[:-1] - - -@python_2_unicode_compatible -class CrontabSchedule(models.Model): - minute = models.CharField(_('minute'), max_length=64, default='*') - hour = models.CharField(_('hour'), max_length=64, default='*') - day_of_week = models.CharField( - _('day of week'), max_length=64, default='*', - ) - day_of_month = models.CharField( - _('day of month'), max_length=64, default='*', - ) - month_of_year = models.CharField( - _('month of year'), max_length=64, default='*', - ) - - class Meta: - verbose_name = _('crontab') - verbose_name_plural = _('crontabs') - ordering = ['month_of_year', 'day_of_month', - 'day_of_week', 'hour', 'minute'] - - def __str__(self): - return '{0} {1} {2} {3} {4} (m/h/d/dM/MY)'.format( - cronexp(self.minute), - cronexp(self.hour), - cronexp(self.day_of_week), - cronexp(self.day_of_month), - cronexp(self.month_of_year), - ) - - @property - def schedule(self): - return schedules.crontab(minute=self.minute, - hour=self.hour, - day_of_week=self.day_of_week, - day_of_month=self.day_of_month, - month_of_year=self.month_of_year) - - @classmethod - def from_schedule(cls, schedule): - spec = {'minute': schedule._orig_minute, - 'hour': schedule._orig_hour, - 'day_of_week': schedule._orig_day_of_week, - 'day_of_month': schedule._orig_day_of_month, - 'month_of_year': schedule._orig_month_of_year} - try: - return cls.objects.get(**spec) - except cls.DoesNotExist: - return cls(**spec) - except MultipleObjectsReturned: - cls.objects.filter(**spec).delete() - return cls(**spec) - - -class PeriodicTasks(models.Model): - ident = models.SmallIntegerField(default=1, primary_key=True, unique=True) - last_update = models.DateTimeField(null=False) - - objects = managers.ExtendedManager() - - @classmethod - def changed(cls, instance, **kwargs): - if not instance.no_changes: - cls.objects.update_or_create(ident=1, - defaults={'last_update': now()}) - - @classmethod - def last_change(cls): - try: - return cls.objects.get(ident=1).last_update - except cls.DoesNotExist: - pass - - -@python_2_unicode_compatible -class PeriodicTask(models.Model): - name = models.CharField( - _('name'), max_length=200, unique=True, - help_text=_('Useful description'), - ) - task = models.CharField(_('task name'), max_length=200) - interval = models.ForeignKey( - IntervalSchedule, - null=True, blank=True, verbose_name=_('interval'), - on_delete=models.CASCADE, - ) - crontab = models.ForeignKey( - CrontabSchedule, null=True, blank=True, verbose_name=_('crontab'), - on_delete=models.CASCADE, - help_text=_('Use one of interval/crontab'), - ) - args = models.TextField( - _('Arguments'), blank=True, default='[]', - help_text=_('JSON encoded positional arguments'), - ) - kwargs = models.TextField( - _('Keyword arguments'), blank=True, default='{}', - help_text=_('JSON encoded keyword arguments'), - ) - queue = models.CharField( - _('queue'), max_length=200, blank=True, null=True, default=None, - help_text=_('Queue defined in CELERY_QUEUES'), - ) - exchange = models.CharField( - _('exchange'), max_length=200, blank=True, null=True, default=None, - ) - routing_key = models.CharField( - _('routing key'), max_length=200, blank=True, null=True, default=None, - ) - expires = models.DateTimeField( - _('expires'), blank=True, null=True, - ) - enabled = models.BooleanField( - _('enabled'), default=True, - ) - last_run_at = models.DateTimeField( - auto_now=False, auto_now_add=False, - editable=False, blank=True, null=True, - ) - total_run_count = models.PositiveIntegerField( - default=0, editable=False, - ) - date_changed = models.DateTimeField(auto_now=True) - description = models.TextField(_('description'), blank=True) - - objects = managers.PeriodicTaskManager() - no_changes = False - - class Meta: - verbose_name = _('periodic task') - verbose_name_plural = _('periodic tasks') - - def validate_unique(self, *args, **kwargs): - super(PeriodicTask, self).validate_unique(*args, **kwargs) - if not self.interval and not self.crontab: - raise ValidationError( - {'interval': ['One of interval or crontab must be set.']}) - if self.interval and self.crontab: - raise ValidationError( - {'crontab': ['Only one of interval or crontab must be set']}) - - def save(self, *args, **kwargs): - self.exchange = self.exchange or None - self.routing_key = self.routing_key or None - self.queue = self.queue or None - if not self.enabled: - self.last_run_at = None - super(PeriodicTask, self).save(*args, **kwargs) - - def __str__(self): - fmt = '{0.name}: {{no schedule}}' - if self.interval: - fmt = '{0.name}: {0.interval}' - if self.crontab: - fmt = '{0.name}: {0.crontab}' - return fmt.format(self) - - @property - def schedule(self): - if self.interval: - return self.interval.schedule - if self.crontab: - return self.crontab.schedule - - -signals.pre_delete.connect(PeriodicTasks.changed, sender=PeriodicTask) -signals.pre_save.connect(PeriodicTasks.changed, sender=PeriodicTask) - - -class WorkerState(models.Model): - hostname = models.CharField(_('hostname'), max_length=255, unique=True) - last_heartbeat = models.DateTimeField(_('last heartbeat'), null=True, - db_index=True) - - objects = managers.ExtendedManager() - - class Meta: - """Model meta-data.""" - verbose_name = _('worker') - verbose_name_plural = _('workers') - get_latest_by = 'last_heartbeat' - ordering = ['-last_heartbeat'] - - def __str__(self): - return self.hostname - - def __repr__(self): - return ''.format(self) - - def is_alive(self): - if self.last_heartbeat: - # Use UTC timestamp if USE_TZ is true, or else use local timestamp - timestamp = mktime(gmtime()) if settings.USE_TZ else time() - return timestamp < heartbeat_expires(self.heartbeat_timestamp) - return False - - @property - def heartbeat_timestamp(self): - return mktime(self.last_heartbeat.timetuple()) - - -@python_2_unicode_compatible -class TaskState(models.Model): - state = models.CharField( - _('state'), max_length=64, choices=TASK_STATE_CHOICES, db_index=True, - ) - task_id = models.CharField(_('UUID'), max_length=36, unique=True) - name = models.CharField( - _('name'), max_length=200, null=True, db_index=True, - ) - tstamp = models.DateTimeField(_('event received at'), db_index=True) - args = models.TextField(_('Arguments'), null=True) - kwargs = models.TextField(_('Keyword arguments'), null=True) - eta = models.DateTimeField(_('ETA'), null=True) - expires = models.DateTimeField(_('expires'), null=True) - result = models.TextField(_('result'), null=True) - traceback = models.TextField(_('traceback'), null=True) - runtime = models.FloatField( - _('execution time'), null=True, - help_text=_('in seconds if task succeeded'), - ) - retries = models.IntegerField(_('number of retries'), default=0) - worker = models.ForeignKey( - WorkerState, null=True, verbose_name=_('worker'), - on_delete=models.CASCADE, - ) - hidden = models.BooleanField(editable=False, default=False, db_index=True) - - objects = managers.TaskStateManager() - - class Meta: - """Model meta-data.""" - verbose_name = _('task') - verbose_name_plural = _('tasks') - get_latest_by = 'tstamp' - ordering = ['-tstamp'] - - def __str__(self): - name = self.name or 'UNKNOWN' - s = '{0.state:<10} {0.task_id:<36} {1}'.format(self, name) - if self.eta: - s += ' eta:{0.eta}'.format(self) - return s - - def __repr__(self): - return ''.format( - self, self.name or 'UNKNOWN', - ) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/mon.py b/thesisenv/lib/python3.6/site-packages/djcelery/mon.py deleted file mode 100644 index 860e07a..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/mon.py +++ /dev/null @@ -1,77 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -import os -import sys -import types - -from celery.app.defaults import strtobool -from celery.utils import import_from_cwd - -from djcelery.compat import setenv - -DEFAULT_APPS = ('django.contrib.auth', - 'django.contrib.contenttypes', - 'django.contrib.sessions', - 'django.contrib.admin', - 'django.contrib.admindocs', - 'djcelery', - ) - -DEFAULTS = {'ROOT_URLCONF': 'djcelery.monproj.urls', - 'DATABASE_ENGINE': 'sqlite3', - 'DATABASE_NAME': 'djcelerymon.db', - 'DATABASES': {'default': { - 'ENGINE': 'django.db.backends.sqlite3', - 'NAME': 'djcelerymon.db'}}, - 'BROKER_URL': 'amqp://', - 'SITE_ID': 1, - 'INSTALLED_APPS': DEFAULT_APPS, - 'DEBUG': strtobool(os.environ.get('DJCELERYMON_DEBUG', '0')) - } - - -def default_settings(name='__default_settings__'): - c = type(name, (types.ModuleType, ), DEFAULTS)(name) - c.__dict__.update({'__file__': __file__}) - sys.modules[name] = c - return name - - -def configure(): - from celery import current_app - from celery.loaders.default import DEFAULT_CONFIG_MODULE - from django.conf import settings - - app = current_app - conf = {} - - if not settings.configured: - if 'loader' in app.__dict__ and app.loader.configured: - conf = current_app.loader.conf - else: - os.environ.pop('CELERY_LOADER', None) - settings_module = os.environ.get('CELERY_CONFIG_MODULE', - DEFAULT_CONFIG_MODULE) - try: - import_from_cwd(settings_module) - except ImportError: - settings_module = default_settings() - settings.configure(SETTINGS_MODULE=settings_module, - **dict(DEFAULTS, **conf)) - - -def run_monitor(argv): - from .management.commands import djcelerymon - djcelerymon.Command().run_from_argv([argv[0], 'djcelerymon'] + argv[1:]) - - -def main(argv=sys.argv): - from django.core import management - setenv('CELERY_LOADER', 'default') - configure() - management.call_command('migrate') - run_monitor(argv) - - -if __name__ == '__main__': - main() diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/monproj/__init__.py b/thesisenv/lib/python3.6/site-packages/djcelery/monproj/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/monproj/urls.py b/thesisenv/lib/python3.6/site-packages/djcelery/monproj/urls.py deleted file mode 100644 index d3d95f6..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/monproj/urls.py +++ /dev/null @@ -1,16 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -from django.conf.urls import include, url -from django.contrib import admin - - -urlpatterns = [ - # Uncomment the admin/doc line below and add 'django.contrib.admindocs' - # to INSTALLED_APPS to enable admin documentation: - url( - r'^doc/', - include('django.contrib.admindocs.urls') - ), - - url(r'', include(admin.site.urls)), -] diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/picklefield.py b/thesisenv/lib/python3.6/site-packages/djcelery/picklefield.py deleted file mode 100644 index a7dc5bd..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/picklefield.py +++ /dev/null @@ -1,128 +0,0 @@ -""" - Based on django-picklefield which is - Copyright (c) 2009-2010 Gintautas Miliauskas - but some improvements including not deepcopying values. - - Provides an implementation of a pickled object field. - Such fields can contain any picklable objects. - - The implementation is taken and adopted from Django snippet #1694 - by Taavi Taijala, - which is in turn based on Django snippet #513 - by Oliver Beattie. - -""" -from __future__ import absolute_import, unicode_literals - -import django - -from base64 import b64encode, b64decode -from zlib import compress, decompress - -from celery.five import with_metaclass -from celery.utils.serialization import pickle -from kombu.utils.encoding import bytes_to_str, str_to_bytes - -from django.db import models - -try: - from django.utils.encoding import force_text -except ImportError: - from django.utils.encoding import force_unicode as force_text # noqa - -DEFAULT_PROTOCOL = 2 - -NO_DECOMPRESS_HEADER = b'\x1e\x00r8d9qwwerwhA@' - - -if django.VERSION >= (1, 8): - BaseField = models.Field -else: - @with_metaclass(models.SubfieldBase, skip_attrs=set([ - 'db_type', - 'get_db_prep_save' - ])) - class BaseField(models.Field): # noqa - pass - - -class PickledObject(str): - pass - - -def maybe_compress(value, do_compress=False): - if do_compress: - return compress(str_to_bytes(value)) - return value - - -def maybe_decompress(value, do_decompress=False): - if do_decompress: - if str_to_bytes(value[:15]) != NO_DECOMPRESS_HEADER: - return decompress(str_to_bytes(value)) - return value - - -def encode(value, compress_object=False, pickle_protocol=DEFAULT_PROTOCOL): - return bytes_to_str(b64encode(maybe_compress( - pickle.dumps(value, pickle_protocol), compress_object), - )) - - -def decode(value, compress_object=False): - return pickle.loads(maybe_decompress(b64decode(value), compress_object)) - - -class PickledObjectField(BaseField): - - def __init__(self, compress=False, protocol=DEFAULT_PROTOCOL, - *args, **kwargs): - self.compress = compress - self.protocol = protocol - kwargs.setdefault('editable', False) - super(PickledObjectField, self).__init__(*args, **kwargs) - - def get_default(self): - if self.has_default(): - return self.default() if callable(self.default) else self.default - return super(PickledObjectField, self).get_default() - - def to_python(self, value): - if value is not None: - try: - return decode(value, self.compress) - except Exception: - if isinstance(value, PickledObject): - raise - return value - - def from_db_value(self, value, expression, connection, context): - return self.to_python(value) - - def get_db_prep_value(self, value, **kwargs): - if value is not None and not isinstance(value, PickledObject): - return force_text(encode(value, self.compress, self.protocol)) - return value - - def value_to_string(self, obj): - return self.get_db_prep_value(self._get_val_from_obj(obj)) - - def get_internal_type(self): - return 'TextField' - - def get_db_prep_lookup(self, lookup_type, value, *args, **kwargs): - if lookup_type not in ['exact', 'in', 'isnull']: - raise TypeError( - 'Lookup type {0} is not supported.'.format(lookup_type)) - return super(PickledObjectField, self) \ - .get_db_prep_lookup(*args, **kwargs) - - -try: - from south.modelsinspector import add_introspection_rules -except ImportError: - pass -else: - add_introspection_rules( - [], [r'^djcelery\.picklefield\.PickledObjectField'], - ) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/schedulers.py b/thesisenv/lib/python3.6/site-packages/djcelery/schedulers.py deleted file mode 100644 index 29d32b9..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/schedulers.py +++ /dev/null @@ -1,282 +0,0 @@ -from __future__ import absolute_import - -import logging - -from multiprocessing.util import Finalize - -from anyjson import loads, dumps -from celery import current_app -from celery import schedules -from celery.beat import Scheduler, ScheduleEntry -from celery.utils.encoding import safe_str, safe_repr -from celery.utils.log import get_logger - -try: - from celery.utils.timeutils import is_naive -except ImportError: - from celery.utils.time import is_naive - -from django.db import transaction -from django.core.exceptions import ObjectDoesNotExist - -from .db import commit_on_success -from .models import (PeriodicTask, PeriodicTasks, - CrontabSchedule, IntervalSchedule) -from .utils import DATABASE_ERRORS, make_aware -from .compat import itervalues - -# This scheduler must wake up more frequently than the -# regular of 5 minutes because it needs to take external -# changes to the schedule into account. -DEFAULT_MAX_INTERVAL = 5 # seconds - -ADD_ENTRY_ERROR = """\ -Couldn't add entry %r to database schedule: %r. Contents: %r -""" - -logger = get_logger(__name__) -debug, info, error = logger.debug, logger.info, logger.error - - -class ModelEntry(ScheduleEntry): - model_schedules = ((schedules.crontab, CrontabSchedule, 'crontab'), - (schedules.schedule, IntervalSchedule, 'interval')) - save_fields = ['last_run_at', 'total_run_count', 'no_changes'] - - def __init__(self, model): - self.app = current_app._get_current_object() - self.name = model.name - self.task = model.task - try: - self.schedule = model.schedule - except model.DoesNotExist: - logger.error('Schedule was removed from database') - logger.warning('Disabling %s', self.name) - self._disable(model) - try: - self.args = loads(model.args or '[]') - self.kwargs = loads(model.kwargs or '{}') - except ValueError: - logging.error('Failed to serialize arguments for %s.', self.name, - exc_info=1) - logging.warning('Disabling %s', self.name) - self._disable(model) - - self.options = {'queue': model.queue, - 'exchange': model.exchange, - 'routing_key': model.routing_key, - 'expires': model.expires} - self.total_run_count = model.total_run_count - self.model = model - - if not model.last_run_at: - model.last_run_at = self._default_now() - orig = self.last_run_at = model.last_run_at - if not is_naive(self.last_run_at): - self.last_run_at = self.last_run_at.replace(tzinfo=None) - assert orig.hour == self.last_run_at.hour # timezone sanity - - def _disable(self, model): - model.no_changes = True - model.enabled = False - model.save() - - def is_due(self): - if not self.model.enabled: - return False, 5.0 # 5 second delay for re-enable. - return self.schedule.is_due(self.last_run_at) - - def _default_now(self): - return self.app.now() - - def __next__(self): - self.model.last_run_at = self.app.now() - self.model.total_run_count += 1 - self.model.no_changes = True - return self.__class__(self.model) - next = __next__ # for 2to3 - - def save(self): - # Object may not be synchronized, so only - # change the fields we care about. - obj = type(self.model)._default_manager.get(pk=self.model.pk) - for field in self.save_fields: - setattr(obj, field, getattr(self.model, field)) - obj.last_run_at = make_aware(obj.last_run_at) - obj.save() - - @classmethod - def to_model_schedule(cls, schedule): - for schedule_type, model_type, model_field in cls.model_schedules: - schedule = schedules.maybe_schedule(schedule) - if isinstance(schedule, schedule_type): - model_schedule = model_type.from_schedule(schedule) - model_schedule.save() - return model_schedule, model_field - raise ValueError( - 'Cannot convert schedule type {0!r} to model'.format(schedule)) - - @classmethod - def from_entry(cls, name, skip_fields=('relative', 'options'), **entry): - options = entry.get('options') or {} - fields = dict(entry) - for skip_field in skip_fields: - fields.pop(skip_field, None) - schedule = fields.pop('schedule') - model_schedule, model_field = cls.to_model_schedule(schedule) - - # reset schedule - for t in cls.model_schedules: - fields[t[2]] = None - - fields[model_field] = model_schedule - fields['args'] = dumps(fields.get('args') or []) - fields['kwargs'] = dumps(fields.get('kwargs') or {}) - fields['queue'] = options.get('queue') - fields['exchange'] = options.get('exchange') - fields['routing_key'] = options.get('routing_key') - obj, _ = PeriodicTask._default_manager.update_or_create( - name=name, defaults=fields, - ) - return cls(obj) - - def __repr__(self): - return ''.format( - safe_str(self.name), self.task, safe_repr(self.args), - safe_repr(self.kwargs), self.schedule, - ) - - -class DatabaseScheduler(Scheduler): - Entry = ModelEntry - Model = PeriodicTask - Changes = PeriodicTasks - _schedule = None - _last_timestamp = None - _initial_read = False - - def __init__(self, *args, **kwargs): - self._dirty = set() - self._finalize = Finalize(self, self.sync, exitpriority=5) - Scheduler.__init__(self, *args, **kwargs) - self.max_interval = ( - kwargs.get('max_interval') or - self.app.conf.CELERYBEAT_MAX_LOOP_INTERVAL or - DEFAULT_MAX_INTERVAL) - - def setup_schedule(self): - self.install_default_entries(self.schedule) - self.update_from_dict(self.app.conf.CELERYBEAT_SCHEDULE) - - def all_as_schedule(self): - debug('DatabaseScheduler: Fetching database schedule') - s = {} - for model in self.Model.objects.enabled(): - try: - s[model.name] = self.Entry(model) - except ValueError: - pass - return s - - def schedule_changed(self): - try: - # If MySQL is running with transaction isolation level - # REPEATABLE-READ (default), then we won't see changes done by - # other transactions until the current transaction is - # committed (Issue #41). - try: - transaction.commit() - except transaction.TransactionManagementError: - pass # not in transaction management. - - last, ts = self._last_timestamp, self.Changes.last_change() - except DATABASE_ERRORS as exc: - # Close the connection when it is broken - transaction.get_connection().close_if_unusable_or_obsolete() - error('Database gave error: %r', exc, exc_info=1) - return False - try: - if ts and ts > (last if last else ts): - return True - finally: - self._last_timestamp = ts - return False - - def reserve(self, entry): - new_entry = Scheduler.reserve(self, entry) - # Need to store entry by name, because the entry may change - # in the mean time. - self._dirty.add(new_entry.name) - return new_entry - - def sync(self): - info('Writing entries (%s)...', len(self._dirty)) - _tried = set() - try: - with commit_on_success(): - while self._dirty: - try: - name = self._dirty.pop() - _tried.add(name) - self.schedule[name].save() - except (KeyError, ObjectDoesNotExist): - pass - except DATABASE_ERRORS as exc: - # retry later - self._dirty |= _tried - error('Database error while sync: %r', exc, exc_info=1) - - def update_from_dict(self, dict_): - s = {} - for name, entry in dict_.items(): - try: - s[name] = self.Entry.from_entry(name, **entry) - except Exception as exc: - error(ADD_ENTRY_ERROR, name, exc, entry) - self.schedule.update(s) - - def install_default_entries(self, data): - entries = {} - if self.app.conf.CELERY_TASK_RESULT_EXPIRES: - entries.setdefault( - 'celery.backend_cleanup', { - 'task': 'celery.backend_cleanup', - 'schedule': schedules.crontab('0', '4', '*'), - 'options': {'expires': 12 * 3600}, - }, - ) - self.update_from_dict(entries) - - @property - def schedule(self): - update = False - if not self._initial_read: - debug('DatabaseScheduler: intial read') - update = True - self._initial_read = True - elif self.schedule_changed(): - info('DatabaseScheduler: Schedule changed.') - update = True - - if update: - self.sync() - self._schedule = self.all_as_schedule() - if logger.isEnabledFor(logging.DEBUG): - debug('Current schedule:\n%s', '\n'.join( - repr(entry) for entry in itervalues(self._schedule)), - ) - return self._schedule - - @classmethod - def create_or_update_task(cls, name, **schedule_dict): - if 'schedule' not in schedule_dict: - try: - schedule_dict['schedule'] = \ - PeriodicTask._default_manager.get(name=name).schedule - except PeriodicTask.DoesNotExist: - pass - cls.Entry.from_entry(name, **schedule_dict) - - @classmethod - def delete_task(cls, name): - PeriodicTask._default_manager.get(name=name).delete() diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/snapshot.py b/thesisenv/lib/python3.6/site-packages/djcelery/snapshot.py deleted file mode 100644 index 39d8af8..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/snapshot.py +++ /dev/null @@ -1,143 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -from collections import defaultdict -from datetime import timedelta - -from django.conf import settings - -from celery import states -from celery.events.state import Task -from celery.events.snapshot import Polaroid -from celery.five import monotonic -from celery.utils.log import get_logger - -try: - from celery.utils.timeutils import maybe_iso8601 -except ImportError: - from celery.utils.time import maybe_iso8601 - -from .models import WorkerState, TaskState -from .utils import fromtimestamp, correct_awareness - -WORKER_UPDATE_FREQ = 60 # limit worker timestamp write freq. -SUCCESS_STATES = frozenset([states.SUCCESS]) - -# Expiry can be timedelta or None for never expire. -EXPIRE_SUCCESS = getattr(settings, 'CELERYCAM_EXPIRE_SUCCESS', - timedelta(days=1)) -EXPIRE_ERROR = getattr(settings, 'CELERYCAM_EXPIRE_ERROR', - timedelta(days=3)) -EXPIRE_PENDING = getattr(settings, 'CELERYCAM_EXPIRE_PENDING', - timedelta(days=5)) -NOT_SAVED_ATTRIBUTES = frozenset(['name', 'args', 'kwargs', 'eta']) - -logger = get_logger(__name__) -debug = logger.debug - - -class Camera(Polaroid): - TaskState = TaskState - WorkerState = WorkerState - - clear_after = True - worker_update_freq = WORKER_UPDATE_FREQ - expire_states = { - SUCCESS_STATES: EXPIRE_SUCCESS, - states.EXCEPTION_STATES: EXPIRE_ERROR, - states.UNREADY_STATES: EXPIRE_PENDING, - } - - def __init__(self, *args, **kwargs): - super(Camera, self).__init__(*args, **kwargs) - self._last_worker_write = defaultdict(lambda: (None, None)) - - def get_heartbeat(self, worker): - try: - heartbeat = worker.heartbeats[-1] - except IndexError: - return - return fromtimestamp(heartbeat) - - def handle_worker(self, hostname_worker): - (hostname, worker) = hostname_worker - last_write, obj = self._last_worker_write[hostname] - if not last_write or \ - monotonic() - last_write > self.worker_update_freq: - obj, _ = self.WorkerState.objects.update_or_create( - hostname=hostname, - defaults={'last_heartbeat': self.get_heartbeat(worker)}, - ) - self._last_worker_write[hostname] = (monotonic(), obj) - return obj - - def handle_task(self, uuid_task, worker=None): - """Handle snapshotted event.""" - uuid, task = uuid_task - if task.worker and task.worker.hostname: - worker = self.handle_worker( - (task.worker.hostname, task.worker), - ) - - defaults = { - 'name': task.name, - 'args': task.args, - 'kwargs': task.kwargs, - 'eta': correct_awareness(maybe_iso8601(task.eta)), - 'expires': correct_awareness(maybe_iso8601(task.expires)), - 'state': task.state, - 'tstamp': fromtimestamp(task.timestamp), - 'result': task.result or task.exception, - 'traceback': task.traceback, - 'runtime': task.runtime, - 'worker': worker - } - # Some fields are only stored in the RECEIVED event, - # so we should remove these from default values, - # so that they are not overwritten by subsequent states. - [defaults.pop(attr, None) for attr in NOT_SAVED_ATTRIBUTES - if defaults[attr] is None] - return self.update_task(task.state, - task_id=uuid, defaults=defaults) - - def update_task(self, state, **kwargs): - objects = self.TaskState.objects - defaults = kwargs.pop('defaults', None) or {} - if not defaults.get('name'): - return - obj, created = objects.get_or_create(defaults=defaults, **kwargs) - if created: - return obj - else: - if states.state(state) < states.state(obj.state): - keep = Task.merge_rules[states.RECEIVED] - defaults = dict( - (k, v) for k, v in defaults.items() - if k not in keep - ) - - for k, v in defaults.items(): - setattr(obj, k, v) - obj.save() - - return obj - - def on_shutter(self, state, commit_every=100): - - def _handle_tasks(): - for i, task in enumerate(state.tasks.items()): - self.handle_task(task) - - for worker in state.workers.items(): - self.handle_worker(worker) - _handle_tasks() - - def on_cleanup(self): - expired = (self.TaskState.objects.expire_by_states(states, expires) - for states, expires in self.expire_states.items()) - dirty = sum(item for item in expired if item is not None) - if dirty: - debug('Cleanup: Marked %s objects as dirty.', dirty) - self.TaskState.objects.purge() - debug('Cleanup: %s objects purged.', dirty) - return dirty - return 0 diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/static/djcelery/style.css b/thesisenv/lib/python3.6/site-packages/djcelery/static/djcelery/style.css deleted file mode 100644 index b4f4c6a..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/static/djcelery/style.css +++ /dev/null @@ -1,4 +0,0 @@ -.form-row.field-traceback p { - font-family: monospace; - white-space: pre; -} diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/templates/admin/djcelery/change_list.html b/thesisenv/lib/python3.6/site-packages/djcelery/templates/admin/djcelery/change_list.html deleted file mode 100644 index 20b269f..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/templates/admin/djcelery/change_list.html +++ /dev/null @@ -1,20 +0,0 @@ -{% extends "admin/change_list.html" %} -{% load i18n %} - -{% block breadcrumbs %} - - {% if wrong_scheduler %} -
    -
  • - Periodic tasks won't be dispatched unless you set the - CELERYBEAT_SCHEDULER setting to - djcelery.schedulers.DatabaseScheduler, - or specify it using the -S option to celerybeat -
  • -
- {% endif %} -{% endblock %} diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/templates/djcelery/confirm_rate_limit.html b/thesisenv/lib/python3.6/site-packages/djcelery/templates/djcelery/confirm_rate_limit.html deleted file mode 100644 index 6152b76..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/templates/djcelery/confirm_rate_limit.html +++ /dev/null @@ -1,25 +0,0 @@ -{% extends "admin/base_site.html" %} -{% load i18n %} - -{% block breadcrumbs %} - -{% endblock %} - -{% block content %} -
{% csrf_token %} -
- {% for obj in queryset %} - - {% endfor %} - - - - -
-
-{% endblock %} diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/__init__.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/_compat.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/_compat.py deleted file mode 100644 index 4969b5c..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/tests/_compat.py +++ /dev/null @@ -1,6 +0,0 @@ -# coding: utf-8 - -try: - from unittest.mock import patch -except ImportError: - from mock import patch # noqa diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/req.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/req.py deleted file mode 100644 index 01a99fb..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/tests/req.py +++ /dev/null @@ -1,76 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -from django.test import Client -from django.core.handlers.wsgi import WSGIRequest -from django.core.handlers.base import BaseHandler - -from celery.utils.compat import WhateverIO - -from djcelery.compat import unicode - - -class RequestFactory(Client): - """Class that lets you create mock Request objects for use in testing. - - Usage: - - rf = RequestFactory() - get_request = rf.get('/hello/') - post_request = rf.post('/submit/', {'foo': 'bar'}) - - This class re-uses the django.test.client.Client interface, docs here: - http://www.djangoproject.com/documentation/testing/#the-test-client - - Once you have a request object you can pass it to any view function, - just as if that view had been hooked up using a URLconf. - - """ - - def request(self, **request): - """Similar to parent class, but returns the request object as - soon as it has created it.""" - environ = { - 'HTTP_COOKIE': unicode(self.cookies), - 'HTTP_USER_AGENT': 'Django UnitTest Client 1.0', - 'REMOTE_ADDR': '127.0.0.1', - 'PATH_INFO': '/', - 'QUERY_STRING': '', - 'REQUEST_METHOD': 'GET', - 'SCRIPT_NAME': '', - 'SERVER_NAME': 'testserver', - 'SERVER_PORT': 80, - 'SERVER_PROTOCOL': 'HTTP/1.1', - 'wsgi.input': WhateverIO(), - } - - environ.update(self.defaults) - environ.update(request) - return WSGIRequest(environ) - - -class MockRequest(object): - - def __init__(self): - handler = BaseHandler() - handler.load_middleware() - self.request_factory = RequestFactory() - self.middleware = handler._request_middleware - - def _make_request(self, request_method, *args, **kwargs): - request_method_handler = getattr(self.request_factory, request_method) - request = request_method_handler(*args, **kwargs) - [middleware_processor(request) - for middleware_processor in self.middleware] - return request - - def get(self, *args, **kwargs): - return self._make_request('get', *args, **kwargs) - - def post(self, *args, **kwargs): - return self._make_request('post', *args, **kwargs) - - def put(self, *args, **kwargs): - return self._make_request('put', *args, **kwargs) - - def delete(self, *args, **kwargs): - return self._make_request('delete', *args, **kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_admin.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_admin.py deleted file mode 100644 index d4238ee..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_admin.py +++ /dev/null @@ -1,86 +0,0 @@ -from __future__ import unicode_literals - -from django.contrib import admin -from django.test import RequestFactory, TestCase - -from djcelery.admin import PeriodicTaskAdmin -from djcelery.models import ( - PeriodicTask, IntervalSchedule, PERIOD_CHOICES, PeriodicTasks -) - - -class MockRequest(object): - pass - - -request = MockRequest() - -site = admin.AdminSite() - - -class TestPeriodicTaskAdmin(TestCase): - @classmethod - def setUpTestData(cls): - cls.interval = IntervalSchedule.objects.create( - every=1, period=PERIOD_CHOICES[0][0]) - - cls.request_factory = RequestFactory() - - cls.pt_admin = PeriodicTaskAdmin(PeriodicTask, site) - - def test_specified_ordering(self): - """ - Ordering should be by ('-enabled', 'name') - """ - PeriodicTask.objects.bulk_create([ - PeriodicTask(name='Bohemian Rhapsody', task='bohemian_rhapsody', - interval=self.interval, enabled=True), - PeriodicTask(name='Somebody to Love', task='somebody_to_love', - interval=self.interval, enabled=False), - PeriodicTask(name='Tie Your Mother Down', - task='tie_your_mother_down', - interval=self.interval, enabled=False), - PeriodicTask(name='Under Pressure', task='under_pressure', - interval=self.interval, enabled=True), - ]) - names = [b.name for b in self.pt_admin.get_queryset(request)] - self.assertListEqual(['Bohemian Rhapsody', 'Under Pressure', - 'Somebody to Love', 'Tie Your Mother Down'], - names) - - def test_enable_tasks_should_enable_disabled_periodic_tasks(self): - """ - enable_tasks action should enable selected periodic tasks - """ - PeriodicTask.objects.create(name='Killer Queen', task='killer_queen', - interval=self.interval, enabled=False), - queryset = PeriodicTask.objects.filter(pk=1) - last_update = PeriodicTasks.objects.get(ident=1).last_update - self.pt_admin.enable_tasks(request, queryset) - new_last_update = PeriodicTasks.objects.get(ident=1).last_update - self.assertTrue(PeriodicTask.objects.get(pk=1).enabled) - self.assertNotEqual(last_update, new_last_update) - - def test_disable_tasks_should_disable_enabled_periodic_tasks(self): - """ - disable_tasks action should disable selected periodic tasks - """ - PeriodicTask.objects.create(name='Killer Queen', task='killer_queen', - interval=self.interval, enabled=True), - queryset = PeriodicTask.objects.filter(pk=1) - self.pt_admin.disable_tasks(request, queryset) - self.assertFalse(PeriodicTask.objects.get(pk=1).enabled) - - def test_for_valid_search_fields(self): - """ - Valid search fields should be ('name', 'task') - """ - search_fields = self.pt_admin.search_fields - self.assertEqual(search_fields, ('name', 'task')) - - for fieldname in search_fields: - query = '%s__icontains' % fieldname - kwargs = {query: 'Queen'} - # We have no content, so the number of results if we search on - # something should be zero. - self.assertEquals(PeriodicTask.objects.filter(**kwargs).count(), 0) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_backends/__init__.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_backends/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_backends/test_cache.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_backends/test_cache.py deleted file mode 100644 index 7bda97d..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_backends/test_cache.py +++ /dev/null @@ -1,115 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -import sys - -from datetime import timedelta - -from billiard.einfo import ExceptionInfo - -from celery import result -from celery import states -from celery.utils import gen_unique_id - -from djcelery.app import app -from djcelery.backends.cache import CacheBackend -from djcelery.tests.utils import unittest - - -class SomeClass(object): - - def __init__(self, data): - self.data = data - - -class test_CacheBackend(unittest.TestCase): - - def test_mark_as_done(self): - cb = CacheBackend(app=app) - - tid = gen_unique_id() - - self.assertEqual(cb.get_status(tid), states.PENDING) - self.assertIsNone(cb.get_result(tid)) - - cb.mark_as_done(tid, 42) - self.assertEqual(cb.get_status(tid), states.SUCCESS) - self.assertEqual(cb.get_result(tid), 42) - self.assertTrue(cb.get_result(tid), 42) - - def test_forget(self): - b = CacheBackend(app=app) - tid = gen_unique_id() - b.mark_as_done(tid, {'foo': 'bar'}) - self.assertEqual(b.get_result(tid).get('foo'), 'bar') - b.forget(tid) - self.assertNotIn(tid, b._cache) - self.assertIsNone(b.get_result(tid)) - - def test_save_restore_delete_group(self): - backend = CacheBackend(app=app) - group_id = gen_unique_id() - subtask_ids = [gen_unique_id() for i in range(10)] - subtasks = list(map(result.AsyncResult, subtask_ids)) - res = result.GroupResult(group_id, subtasks) - res.save(backend=backend) - saved = result.GroupResult.restore(group_id, backend=backend) - self.assertListEqual(saved.subtasks, subtasks) - self.assertEqual(saved.id, group_id) - saved.delete(backend=backend) - self.assertIsNone(result.GroupResult.restore(group_id, - backend=backend)) - - def test_is_pickled(self): - cb = CacheBackend(app=app) - - tid2 = gen_unique_id() - result = {'foo': 'baz', 'bar': SomeClass(12345)} - cb.mark_as_done(tid2, result) - # is serialized properly. - rindb = cb.get_result(tid2) - self.assertEqual(rindb.get('foo'), 'baz') - self.assertEqual(rindb.get('bar').data, 12345) - - def test_mark_as_failure(self): - cb = CacheBackend(app=app) - - einfo = None - tid3 = gen_unique_id() - try: - raise KeyError('foo') - except KeyError as exception: - einfo = ExceptionInfo(sys.exc_info()) - cb.mark_as_failure(tid3, exception, traceback=einfo.traceback) - self.assertEqual(cb.get_status(tid3), states.FAILURE) - self.assertIsInstance(cb.get_result(tid3), KeyError) - self.assertEqual(cb.get_traceback(tid3), einfo.traceback) - - def test_process_cleanup(self): - cb = CacheBackend(app=app) - cb.process_cleanup() - - def test_set_expires(self): - cb1 = CacheBackend(app=app, expires=timedelta(seconds=16)) - self.assertEqual(cb1.expires, 16) - cb2 = CacheBackend(app=app, expires=32) - self.assertEqual(cb2.expires, 32) - - -class test_custom_CacheBackend(unittest.TestCase): - - def test_custom_cache_backend(self): - from celery import current_app - prev_backend = current_app.conf.CELERY_CACHE_BACKEND - prev_module = sys.modules['djcelery.backends.cache'] - - current_app.conf.CELERY_CACHE_BACKEND = 'dummy' - sys.modules.pop('djcelery.backends.cache') - try: - from djcelery.backends.cache import cache - from django.core.cache import cache as django_cache - self.assertEqual(cache.__class__.__module__, - 'django.core.cache.backends.dummy') - self.assertIsNot(cache, django_cache) - finally: - current_app.conf.CELERY_CACHE_BACKEND = prev_backend - sys.modules['djcelery.backends.cache'] = prev_module diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_backends/test_database.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_backends/test_database.py deleted file mode 100644 index cf2591b..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_backends/test_database.py +++ /dev/null @@ -1,105 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -import celery - -from datetime import timedelta - -from celery import current_app -from celery import states -from celery.result import AsyncResult -from celery.task import PeriodicTask -from celery.utils import gen_unique_id - -from djcelery.app import app -from djcelery.backends.database import DatabaseBackend -from djcelery.utils import now -from djcelery.tests.utils import unittest - - -class SomeClass(object): - - def __init__(self, data): - self.data = data - - -class MyPeriodicTask(PeriodicTask): - name = 'c.u.my-periodic-task-244' - run_every = timedelta(seconds=1) - - def run(self, **kwargs): - return 42 - - -class TestDatabaseBackend(unittest.TestCase): - - def test_backend(self): - b = DatabaseBackend(app=app) - tid = gen_unique_id() - - self.assertEqual(b.get_status(tid), states.PENDING) - self.assertIsNone(b.get_result(tid)) - - b.mark_as_done(tid, 42) - self.assertEqual(b.get_status(tid), states.SUCCESS) - self.assertEqual(b.get_result(tid), 42) - - tid2 = gen_unique_id() - result = {'foo': 'baz', 'bar': SomeClass(12345)} - b.mark_as_done(tid2, result) - # is serialized properly. - rindb = b.get_result(tid2) - self.assertEqual(rindb.get('foo'), 'baz') - self.assertEqual(rindb.get('bar').data, 12345) - - tid3 = gen_unique_id() - try: - raise KeyError('foo') - except KeyError as exception: - b.mark_as_failure(tid3, exception) - - self.assertEqual(b.get_status(tid3), states.FAILURE) - self.assertIsInstance(b.get_result(tid3), KeyError) - - def test_forget(self): - b = DatabaseBackend(app=app) - tid = gen_unique_id() - b.mark_as_done(tid, {'foo': 'bar'}) - x = AsyncResult(tid) - self.assertEqual(x.result.get('foo'), 'bar') - x.forget() - if celery.VERSION[0:3] == (3, 1, 10): - # bug in 3.1.10 means result did not clear cache after forget. - x._cache = None - self.assertIsNone(x.result) - - def test_group_store(self): - b = DatabaseBackend(app=app) - tid = gen_unique_id() - - self.assertIsNone(b.restore_group(tid)) - - result = {'foo': 'baz', 'bar': SomeClass(12345)} - b.save_group(tid, result) - rindb = b.restore_group(tid) - self.assertIsNotNone(rindb) - self.assertEqual(rindb.get('foo'), 'baz') - self.assertEqual(rindb.get('bar').data, 12345) - b.delete_group(tid) - self.assertIsNone(b.restore_group(tid)) - - def test_cleanup(self): - b = DatabaseBackend(app=app) - b.TaskModel._default_manager.all().delete() - ids = [gen_unique_id() for _ in range(3)] - for i, res in enumerate((16, 32, 64)): - b.mark_as_done(ids[i], res) - - self.assertEqual(b.TaskModel._default_manager.count(), 3) - - then = now() - current_app.conf.CELERY_TASK_RESULT_EXPIRES * 2 - # Have to avoid save() because it applies the auto_now=True. - b.TaskModel._default_manager.filter(task_id__in=ids[:-1]) \ - .update(date_done=then) - - b.cleanup() - self.assertEqual(b.TaskModel._default_manager.count(), 1) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_commands.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_commands.py deleted file mode 100644 index 2be01f9..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_commands.py +++ /dev/null @@ -1,32 +0,0 @@ -# coding: utf-8 -from django import VERSION -from django.core.management import execute_from_command_line - -from ._compat import patch - - -CELERYD_COMMAND = 'djcelery.management.commands.celeryd.Command.handle' - - -def test_celeryd_command(): - if VERSION >= (1, 10): - traceback = False - else: - traceback = None - with patch(CELERYD_COMMAND) as handle: - execute_from_command_line(['manage.py', 'celeryd', '--hostname=test', - '--loglevel=info']) - handle.assert_called_with( - autoreload=None, autoscale=None, beat=None, broker=None, - concurrency=0, detach=None, exclude_queues=[], executable=None, - gid=None, heartbeat_interval=None, hostname="test", include=[], - logfile=None, loglevel='info', max_tasks_per_child=None, - no_color=False, no_execv=False, optimization=None, pidfile=None, - pool_cls='prefork', purge=False, pythonpath=None, queues=[], - quiet=None, schedule_filename='celerybeat-schedule', - scheduler_cls=None, send_events=False, settings=None, - state_db=None, task_soft_time_limit=None, - task_time_limit=None, traceback=traceback, uid=None, umask=None, - verbosity=1, without_gossip=False, without_heartbeat=False, - without_mingle=False, working_directory=None - ) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_discovery.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_discovery.py deleted file mode 100644 index 22ebb4e..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_discovery.py +++ /dev/null @@ -1,35 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -import warnings - -from django.conf import settings - -from celery.registry import tasks - -from djcelery.loaders import autodiscover -from djcelery.tests.utils import unittest - - -class TestDiscovery(unittest.TestCase): - - def assertDiscovery(self): - apps = autodiscover() - self.assertTrue(apps) - self.assertIn('c.unittest.SomeAppTask', tasks) - self.assertEqual(tasks['c.unittest.SomeAppTask'].run(), 42) - - def test_discovery(self): - if 'someapp' in settings.INSTALLED_APPS: - self.assertDiscovery() - - def test_discovery_with_broken(self): - warnings.resetwarnings() - if 'someapp' in settings.INSTALLED_APPS: - installed_apps = list(settings.INSTALLED_APPS) - settings.INSTALLED_APPS = installed_apps + ['xxxnot.aexist'] - try: - with warnings.catch_warnings(record=True) as log: - autodiscover() - self.assertTrue(log) - finally: - settings.INSTALLED_APPS = installed_apps diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_loaders.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_loaders.py deleted file mode 100644 index 2170e9b..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_loaders.py +++ /dev/null @@ -1,45 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -from celery import loaders - -from djcelery import loaders as djloaders -from djcelery.app import app -from djcelery.tests.utils import unittest - - -class TestDjangoLoader(unittest.TestCase): - - def setUp(self): - self.loader = djloaders.DjangoLoader(app=app) - - def test_get_loader_cls(self): - - self.assertEqual(loaders.get_loader_cls('django'), - self.loader.__class__) - # Execute cached branch. - self.assertEqual(loaders.get_loader_cls('django'), - self.loader.__class__) - - def test_on_worker_init(self): - from django.conf import settings - old_imports = getattr(settings, 'CELERY_IMPORTS', ()) - settings.CELERY_IMPORTS = ('xxx.does.not.exist', ) - try: - self.assertRaises(ImportError, self.loader.import_default_modules) - finally: - settings.CELERY_IMPORTS = old_imports - - def test_race_protection(self): - djloaders._RACE_PROTECTION = True - try: - self.assertFalse(self.loader.on_worker_init()) - finally: - djloaders._RACE_PROTECTION = False - - def test_find_related_module_no_path(self): - self.assertFalse(djloaders.find_related_module('sys', 'tasks')) - - def test_find_related_module_no_related(self): - self.assertFalse( - djloaders.find_related_module('someapp', 'frobulators'), - ) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_models.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_models.py deleted file mode 100644 index 9ee2575..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_models.py +++ /dev/null @@ -1,102 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -from datetime import datetime, timedelta - -from celery import states -from celery.utils import gen_unique_id - -from djcelery import celery -from djcelery.models import TaskMeta, TaskSetMeta -from djcelery.utils import now -from djcelery.tests.utils import unittest -from djcelery.compat import unicode - - -class TestModels(unittest.TestCase): - - def createTaskMeta(self): - id = gen_unique_id() - taskmeta, created = TaskMeta.objects.get_or_create(task_id=id) - return taskmeta - - def createTaskSetMeta(self): - id = gen_unique_id() - tasksetmeta, created = TaskSetMeta.objects.get_or_create(taskset_id=id) - return tasksetmeta - - def test_taskmeta(self): - m1 = self.createTaskMeta() - m2 = self.createTaskMeta() - m3 = self.createTaskMeta() - self.assertTrue(unicode(m1).startswith(''.format(self) - - -class MyRetryTaskError(MyError): - pass - - -task_is_successful = partial(reversestar, 'celery-is_task_successful') -task_status = partial(reversestar, 'celery-task_status') -task_apply = partial(reverse, 'celery-apply') -registered_tasks = partial(reverse, 'celery-tasks') -scratch = {} - - -@task() -def mytask(x, y): - ret = scratch['result'] = int(x) * int(y) - return ret - - -def create_exception(name, base=Exception): - return type(name, (base, ), {}) - - -def catch_exception(exception): - try: - raise exception - except exception.__class__ as exc: - exc = current_app.backend.prepare_exception(exc) - return exc, ExceptionInfo(sys.exc_info()).traceback - - -class ViewTestCase(DjangoTestCase): - - def assertJSONEqual(self, json, py): - json = isinstance(json, HttpResponse) and json.content or json - try: - self.assertEqual(deserialize(json.decode('utf-8')), py) - except TypeError as exc: - raise TypeError('{0}: {1}'.format(exc, json)) - - def assertIn(self, expected, source, *args): - try: - DjangoTestCase.assertIn(self, expected, source, *args) - except AttributeError: - self.assertTrue(expected in source) - - def assertDictContainsSubset(self, subset, dictionary, *args): - for key, value in subset.items(): - self.assertIn(key, dictionary) - self.assertEqual(dictionary[key], value) - - -class test_task_apply(ViewTestCase): - - def test_apply(self): - current_app.conf.CELERY_ALWAYS_EAGER = True - try: - self.client.get( - task_apply(kwargs={'task_name': mytask.name}) + '?x=4&y=4', - ) - self.assertEqual(scratch['result'], 16) - finally: - current_app.conf.CELERY_ALWAYS_EAGER = False - - def test_apply_raises_404_on_unregistered_task(self): - current_app.conf.CELERY_ALWAYS_EAGER = True - try: - name = 'xxx.does.not.exist' - action = partial( - self.client.get, - task_apply(kwargs={'task_name': name}) + '?x=4&y=4', - ) - try: - res = action() - except TemplateDoesNotExist: - pass # pre Django 1.5 - else: - self.assertEqual(res.status_code, 404) - finally: - current_app.conf.CELERY_ALWAYS_EAGER = False - - -class test_registered_tasks(ViewTestCase): - - def test_list_registered_tasks(self): - json = self.client.get(registered_tasks()) - tasks = deserialize(json.content.decode('utf-8')) - self.assertIn('celery.backend_cleanup', tasks['regular']) - - -class test_webhook_task(ViewTestCase): - - def test_successful_request(self): - - @task_webhook - def add_webhook(request): - x = int(request.GET['x']) - y = int(request.GET['y']) - return x + y - - request = MockRequest().get('/tasks/add', dict(x=10, y=10)) - response = add_webhook(request) - self.assertDictContainsSubset( - {'status': 'success', 'retval': 20}, - deserialize(response.content.decode('utf-8'))) - - def test_failed_request(self): - - @task_webhook - def error_webhook(request): - x = int(request.GET['x']) - y = int(request.GET['y']) - raise MyError(x + y) - - request = MockRequest().get('/tasks/error', dict(x=10, y=10)) - response = error_webhook(request) - self.assertDictContainsSubset( - {'status': 'failure', - 'reason': ''}, - deserialize(response.content.decode('utf-8'))) - - -class test_task_status(ViewTestCase): - - def assertStatusForIs(self, status, res, traceback=None): - uuid = gen_unique_id() - current_app.backend.store_result(uuid, res, status, - traceback=traceback) - json = self.client.get(task_status(task_id=uuid)) - expect = dict(id=uuid, status=status, result=res) - if status in current_app.backend.EXCEPTION_STATES: - instore = current_app.backend.get_result(uuid) - self.assertEqual(str(instore.args[0]), str(res.args[0])) - expect['result'] = repr(res) - expect['exc'] = get_full_cls_name(res.__class__) - expect['traceback'] = traceback - - self.assertJSONEqual(json, dict(task=expect)) - - def test_success(self): - self.assertStatusForIs(states.SUCCESS, 'The quick brown fox') - - def test_failure(self): - exc, tb = catch_exception(MyError('foo')) - self.assertStatusForIs(states.FAILURE, exc, tb) - - def test_retry(self): - oexc, _ = catch_exception(MyError('Resource not available')) - exc, tb = catch_exception(MyRetryTaskError(str(oexc), oexc)) - self.assertStatusForIs(states.RETRY, exc, tb) - - -class test_task_is_successful(ViewTestCase): - - def assertStatusForIs(self, status, outcome, result=None): - uuid = gen_unique_id() - result = result or gen_unique_id() - current_app.backend.store_result(uuid, result, status) - json = self.client.get(task_is_successful(task_id=uuid)) - self.assertJSONEqual(json, {'task': {'id': uuid, - 'executed': outcome}}) - - def test_success(self): - self.assertStatusForIs(states.SUCCESS, True) - - def test_pending(self): - self.assertStatusForIs(states.PENDING, False) - - def test_failure(self): - self.assertStatusForIs(states.FAILURE, False, KeyError('foo')) - - def test_retry(self): - self.assertStatusForIs(states.RETRY, False, KeyError('foo')) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_worker_job.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_worker_job.py deleted file mode 100644 index c625f5a..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_worker_job.py +++ /dev/null @@ -1,82 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import, unicode_literals - -from django.core import cache - -from celery.utils import gen_unique_id -from celery.task import task as task_dec - -from celery.tests.worker.test_request import jail - -from djcelery.app import app -from djcelery.tests.utils import unittest - - -@task_dec() -def mytask(i): - return i ** i - - -@task_dec() -def get_db_connection(i): - from django.db import connection - return id(connection) - - -get_db_connection.ignore_result = True - - -class TestJail(unittest.TestCase): - - def test_django_db_connection_is_closed(self): - from django.db import connection - connection._was_closed = False - old_connection_close = connection.close - - def monkeypatched_connection_close(*args, **kwargs): - connection._was_closed = True - return old_connection_close(*args, **kwargs) - - connection.close = monkeypatched_connection_close - try: - jail(app, gen_unique_id(), get_db_connection.name, [2], {}) - self.assertTrue(connection._was_closed) - finally: - connection.close = old_connection_close - - def test_django_cache_connection_is_closed(self): - old_cache_close = getattr(cache.cache, 'close', None) - cache._was_closed = False - old_cache_parse_backend = getattr(cache, 'parse_backend_uri', None) - if old_cache_parse_backend: # checks to make sure attr exists - delattr(cache, 'parse_backend_uri') - - def monkeypatched_cache_close(*args, **kwargs): - cache._was_closed = True - - cache.cache.close = monkeypatched_cache_close - - jail(app, gen_unique_id(), mytask.name, [4], {}) - self.assertTrue(cache._was_closed) - cache.cache.close = old_cache_close - if old_cache_parse_backend: - cache.parse_backend_uri = old_cache_parse_backend - - def test_django_cache_connection_is_closed_django_1_1(self): - old_cache_close = getattr(cache.cache, 'close', None) - cache._was_closed = False - old_cache_parse_backend = getattr(cache, 'parse_backend_uri', None) - cache.parse_backend_uri = lambda uri: ['libmemcached', '1', '2'] - - def monkeypatched_cache_close(*args, **kwargs): - cache._was_closed = True - - cache.cache.close = monkeypatched_cache_close - - jail(app, gen_unique_id(), mytask.name, [4], {}) - self.assertTrue(cache._was_closed) - cache.cache.close = old_cache_close - if old_cache_parse_backend: - cache.parse_backend_uri = old_cache_parse_backend - else: - del(cache.parse_backend_uri) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/utils.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/utils.py deleted file mode 100644 index 1d93261..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/tests/utils.py +++ /dev/null @@ -1,7 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -try: - import unittest - unittest.skip -except AttributeError: - import unittest2 as unittest # noqa diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/transport/__init__.py b/thesisenv/lib/python3.6/site-packages/djcelery/transport/__init__.py deleted file mode 100644 index e4512f0..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/transport/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -""" - -This module is an alias to :mod:`kombu.transport.django` - -""" -from __future__ import absolute_import, unicode_literals - -import kombu.transport.django as transport - -__path__.extend(transport.__path__) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/urls.py b/thesisenv/lib/python3.6/site-packages/djcelery/urls.py deleted file mode 100644 index 079eb51..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/urls.py +++ /dev/null @@ -1,40 +0,0 @@ -""" - -URLs defined for celery. - -* ``/$task_id/done/`` - - URL to :func:`~celery.views.is_successful`. - -* ``/$task_id/status/`` - - URL to :func:`~celery.views.task_status`. - -""" -from __future__ import absolute_import, unicode_literals - - -from django.conf.urls import url - - -from . import views - -task_pattern = r'(?P[\w\d\-\.]+)' - -urlpatterns = [ - url( - r'^%s/done/?$' % task_pattern, - views.is_task_successful, - name='celery-is_task_successful' - ), - url( - r'^%s/status/?$' % task_pattern, - views.task_status, - name='celery-task_status' - ), - url( - r'^tasks/?$', - views.registered_tasks, - name='celery-tasks' - ), -] diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/utils.py b/thesisenv/lib/python3.6/site-packages/djcelery/utils.py deleted file mode 100644 index 71ec83a..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/utils.py +++ /dev/null @@ -1,92 +0,0 @@ -# -- XXX This module must not use translation as that causes -# -- a recursive loader import! -from __future__ import absolute_import, unicode_literals - -from datetime import datetime - -from django.conf import settings -from django.utils import timezone - -# Database-related exceptions. -from django.db import DatabaseError -try: - import MySQLdb as mysql - _my_database_errors = (mysql.DatabaseError, - mysql.InterfaceError, - mysql.OperationalError) -except ImportError: - _my_database_errors = () # noqa -try: - import psycopg2 as pg - _pg_database_errors = (pg.DatabaseError, - pg.InterfaceError, - pg.OperationalError) -except ImportError: - _pg_database_errors = () # noqa -try: - import sqlite3 - _lite_database_errors = (sqlite3.DatabaseError, - sqlite3.InterfaceError, - sqlite3.OperationalError) -except ImportError: - _lite_database_errors = () # noqa -try: - import cx_Oracle as oracle - _oracle_database_errors = (oracle.DatabaseError, - oracle.InterfaceError, - oracle.OperationalError) -except ImportError: - _oracle_database_errors = () # noqa - -DATABASE_ERRORS = ((DatabaseError, ) + - _my_database_errors + - _pg_database_errors + - _lite_database_errors + - _oracle_database_errors) - - -def make_aware(value): - if settings.USE_TZ: - # naive datetimes are assumed to be in UTC. - if timezone.is_naive(value): - value = timezone.make_aware(value, timezone.utc) - # then convert to the Django configured timezone. - default_tz = timezone.get_default_timezone() - value = timezone.localtime(value, default_tz) - return value - - -def make_naive(value): - if settings.USE_TZ: - default_tz = timezone.get_default_timezone() - value = timezone.make_naive(value, default_tz) - return value - - -def now(): - return make_aware(timezone.now()) - - -def correct_awareness(value): - if isinstance(value, datetime): - if settings.USE_TZ: - return make_aware(value) - elif timezone.is_aware(value): - default_tz = timezone.get_default_timezone() - return timezone.make_naive(value, default_tz) - return value - - -def is_database_scheduler(scheduler): - if not scheduler: - return False - from kombu.utils import symbol_by_name - from .schedulers import DatabaseScheduler - return issubclass(symbol_by_name(scheduler), DatabaseScheduler) - - -def fromtimestamp(value): - if settings.USE_TZ: - return make_aware(datetime.utcfromtimestamp(value)) - else: - return datetime.fromtimestamp(value) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/views.py b/thesisenv/lib/python3.6/site-packages/djcelery/views.py deleted file mode 100644 index 47f9654..0000000 --- a/thesisenv/lib/python3.6/site-packages/djcelery/views.py +++ /dev/null @@ -1,125 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -from functools import wraps - -from django.http import HttpResponse, Http404 - -from anyjson import serialize - -from celery import states -from celery.five import keys, items -from celery.registry import tasks -from celery.result import AsyncResult -from celery.utils import get_full_cls_name -from celery.utils.encoding import safe_repr - -# Ensure built-in tasks are loaded for task_list view -import celery.task # noqa - - -def JsonResponse(response): - return HttpResponse(serialize(response), content_type='application/json') - - -def task_view(task): - """Decorator turning any task into a view that applies the task - asynchronously. Keyword arguments (via URLconf, etc.) will - supercede GET or POST parameters when there are conflicts. - - Returns a JSON dictionary containing the keys ``ok``, and - ``task_id``. - - """ - - def _applier(request, **options): - kwargs = request.POST if request.method == 'POST' else request.GET - # no multivalue - kwargs = {k: v for k, v in items(kwargs)} - if options: - kwargs.update(options) - result = task.apply_async(kwargs=kwargs) - return JsonResponse({'ok': 'true', 'task_id': result.task_id}) - - return _applier - - -def apply(request, task_name): - """View applying a task. - - **Note:** Please use this with caution. Preferably you shouldn't make this - publicly accessible without ensuring your code is safe! - - """ - try: - task = tasks[task_name] - except KeyError: - raise Http404('apply: no such task') - return task_view(task)(request) - - -def is_task_successful(request, task_id): - """Returns task execute status in JSON format.""" - return JsonResponse({'task': { - 'id': task_id, - 'executed': AsyncResult(task_id).successful(), - }}) - - -def task_status(request, task_id): - """Returns task status and result in JSON format.""" - result = AsyncResult(task_id) - state, retval = result.state, result.result - response_data = {'id': task_id, 'status': state, 'result': retval} - if state in states.EXCEPTION_STATES: - traceback = result.traceback - response_data.update({'result': safe_repr(retval), - 'exc': get_full_cls_name(retval.__class__), - 'traceback': traceback}) - return JsonResponse({'task': response_data}) - - -def registered_tasks(request): - """View returning all defined tasks as a JSON object.""" - return JsonResponse({'regular': list(keys(tasks)), 'periodic': ''}) - - -def task_webhook(fun): - """Decorator turning a function into a task webhook. - - If an exception is raised within the function, the decorated - function catches this and returns an error JSON response, otherwise - it returns the result as a JSON response. - - - Example: - - .. code-block:: python - - @task_webhook - def add(request): - x = int(request.GET['x']) - y = int(request.GET['y']) - return x + y - - def view(request): - response = add(request) - print(response.content) - - Gives:: - - "{'status': 'success', 'retval': 100}" - - """ - - @wraps(fun) - def _inner(*args, **kwargs): - try: - retval = fun(*args, **kwargs) - except Exception as exc: - response = {'status': 'failure', 'reason': safe_repr(exc)} - else: - response = {'status': 'success', 'retval': retval} - - return JsonResponse(response) - - return _inner