Browse Source

deleted djcelery

master
Esther Kleinhenz 6 years ago
parent
commit
65d167a8b8
100 changed files with 43 additions and 23890 deletions
  1. 0
    4
      application/__init__.py
  2. 1
    41
      application/admin.py
  3. 0
    13
      application/celeryapp.py
  4. 0
    25
      application/forms.py
  5. 37
    0
      application/migrations/0002_auto_20181030_1223.py
  6. 1
    37
      application/models.py
  7. 0
    7
      application/tasks.py
  8. 4
    0
      log.txt
  9. 0
    11
      mysite/settings.py
  10. 0
    155
      thesisenv/lib/python3.6/site-packages/celery/__init__.py
  11. 0
    54
      thesisenv/lib/python3.6/site-packages/celery/__main__.py
  12. 0
    159
      thesisenv/lib/python3.6/site-packages/celery/_state.py
  13. 0
    150
      thesisenv/lib/python3.6/site-packages/celery/app/__init__.py
  14. 0
    512
      thesisenv/lib/python3.6/site-packages/celery/app/amqp.py
  15. 0
    58
      thesisenv/lib/python3.6/site-packages/celery/app/annotations.py
  16. 0
    675
      thesisenv/lib/python3.6/site-packages/celery/app/base.py
  17. 0
    379
      thesisenv/lib/python3.6/site-packages/celery/app/builtins.py
  18. 0
    317
      thesisenv/lib/python3.6/site-packages/celery/app/control.py
  19. 0
    274
      thesisenv/lib/python3.6/site-packages/celery/app/defaults.py
  20. 0
    257
      thesisenv/lib/python3.6/site-packages/celery/app/log.py
  21. 0
    71
      thesisenv/lib/python3.6/site-packages/celery/app/registry.py
  22. 0
    95
      thesisenv/lib/python3.6/site-packages/celery/app/routes.py
  23. 0
    948
      thesisenv/lib/python3.6/site-packages/celery/app/task.py
  24. 0
    441
      thesisenv/lib/python3.6/site-packages/celery/app/trace.py
  25. 0
    266
      thesisenv/lib/python3.6/site-packages/celery/app/utils.py
  26. 0
    0
      thesisenv/lib/python3.6/site-packages/celery/apps/__init__.py
  27. 0
    151
      thesisenv/lib/python3.6/site-packages/celery/apps/beat.py
  28. 0
    372
      thesisenv/lib/python3.6/site-packages/celery/apps/worker.py
  29. 0
    68
      thesisenv/lib/python3.6/site-packages/celery/backends/__init__.py
  30. 0
    317
      thesisenv/lib/python3.6/site-packages/celery/backends/amqp.py
  31. 0
    623
      thesisenv/lib/python3.6/site-packages/celery/backends/base.py
  32. 0
    161
      thesisenv/lib/python3.6/site-packages/celery/backends/cache.py
  33. 0
    196
      thesisenv/lib/python3.6/site-packages/celery/backends/cassandra.py
  34. 0
    116
      thesisenv/lib/python3.6/site-packages/celery/backends/couchbase.py
  35. 0
    201
      thesisenv/lib/python3.6/site-packages/celery/backends/database/__init__.py
  36. 0
    74
      thesisenv/lib/python3.6/site-packages/celery/backends/database/models.py
  37. 0
    62
      thesisenv/lib/python3.6/site-packages/celery/backends/database/session.py
  38. 0
    264
      thesisenv/lib/python3.6/site-packages/celery/backends/mongodb.py
  39. 0
    295
      thesisenv/lib/python3.6/site-packages/celery/backends/redis.py
  40. 0
    67
      thesisenv/lib/python3.6/site-packages/celery/backends/rpc.py
  41. 0
    571
      thesisenv/lib/python3.6/site-packages/celery/beat.py
  42. 0
    5
      thesisenv/lib/python3.6/site-packages/celery/bin/__init__.py
  43. 0
    380
      thesisenv/lib/python3.6/site-packages/celery/bin/amqp.py
  44. 0
    668
      thesisenv/lib/python3.6/site-packages/celery/bin/base.py
  45. 0
    100
      thesisenv/lib/python3.6/site-packages/celery/bin/beat.py
  46. 0
    850
      thesisenv/lib/python3.6/site-packages/celery/bin/celery.py
  47. 0
    181
      thesisenv/lib/python3.6/site-packages/celery/bin/celeryd_detach.py
  48. 0
    139
      thesisenv/lib/python3.6/site-packages/celery/bin/events.py
  49. 0
    191
      thesisenv/lib/python3.6/site-packages/celery/bin/graph.py
  50. 0
    646
      thesisenv/lib/python3.6/site-packages/celery/bin/multi.py
  51. 0
    270
      thesisenv/lib/python3.6/site-packages/celery/bin/worker.py
  52. 0
    422
      thesisenv/lib/python3.6/site-packages/celery/bootsteps.py
  53. 0
    698
      thesisenv/lib/python3.6/site-packages/celery/canvas.py
  54. 0
    29
      thesisenv/lib/python3.6/site-packages/celery/concurrency/__init__.py
  55. 0
    1270
      thesisenv/lib/python3.6/site-packages/celery/concurrency/asynpool.py
  56. 0
    171
      thesisenv/lib/python3.6/site-packages/celery/concurrency/base.py
  57. 0
    161
      thesisenv/lib/python3.6/site-packages/celery/concurrency/eventlet.py
  58. 0
    136
      thesisenv/lib/python3.6/site-packages/celery/concurrency/gevent.py
  59. 0
    178
      thesisenv/lib/python3.6/site-packages/celery/concurrency/prefork.py
  60. 0
    30
      thesisenv/lib/python3.6/site-packages/celery/concurrency/solo.py
  61. 0
    57
      thesisenv/lib/python3.6/site-packages/celery/concurrency/threads.py
  62. 0
    0
      thesisenv/lib/python3.6/site-packages/celery/contrib/__init__.py
  63. 0
    172
      thesisenv/lib/python3.6/site-packages/celery/contrib/abortable.py
  64. 0
    249
      thesisenv/lib/python3.6/site-packages/celery/contrib/batches.py
  65. 0
    126
      thesisenv/lib/python3.6/site-packages/celery/contrib/methods.py
  66. 0
    365
      thesisenv/lib/python3.6/site-packages/celery/contrib/migrate.py
  67. 0
    183
      thesisenv/lib/python3.6/site-packages/celery/contrib/rdb.py
  68. 0
    76
      thesisenv/lib/python3.6/site-packages/celery/contrib/sphinx.py
  69. 0
    671
      thesisenv/lib/python3.6/site-packages/celery/datastructures.py
  70. 0
    408
      thesisenv/lib/python3.6/site-packages/celery/events/__init__.py
  71. 0
    544
      thesisenv/lib/python3.6/site-packages/celery/events/cursesmon.py
  72. 0
    109
      thesisenv/lib/python3.6/site-packages/celery/events/dumper.py
  73. 0
    114
      thesisenv/lib/python3.6/site-packages/celery/events/snapshot.py
  74. 0
    656
      thesisenv/lib/python3.6/site-packages/celery/events/state.py
  75. 0
    171
      thesisenv/lib/python3.6/site-packages/celery/exceptions.py
  76. 0
    392
      thesisenv/lib/python3.6/site-packages/celery/five.py
  77. 0
    0
      thesisenv/lib/python3.6/site-packages/celery/fixups/__init__.py
  78. 0
    266
      thesisenv/lib/python3.6/site-packages/celery/fixups/django.py
  79. 0
    37
      thesisenv/lib/python3.6/site-packages/celery/loaders/__init__.py
  80. 0
    17
      thesisenv/lib/python3.6/site-packages/celery/loaders/app.py
  81. 0
    299
      thesisenv/lib/python3.6/site-packages/celery/loaders/base.py
  82. 0
    52
      thesisenv/lib/python3.6/site-packages/celery/loaders/default.py
  83. 0
    373
      thesisenv/lib/python3.6/site-packages/celery/local.py
  84. 0
    813
      thesisenv/lib/python3.6/site-packages/celery/platforms.py
  85. 0
    925
      thesisenv/lib/python3.6/site-packages/celery/result.py
  86. 0
    593
      thesisenv/lib/python3.6/site-packages/celery/schedules.py
  87. 0
    71
      thesisenv/lib/python3.6/site-packages/celery/security/__init__.py
  88. 0
    93
      thesisenv/lib/python3.6/site-packages/celery/security/certificate.py
  89. 0
    27
      thesisenv/lib/python3.6/site-packages/celery/security/key.py
  90. 0
    110
      thesisenv/lib/python3.6/site-packages/celery/security/serialization.py
  91. 0
    35
      thesisenv/lib/python3.6/site-packages/celery/security/utils.py
  92. 0
    76
      thesisenv/lib/python3.6/site-packages/celery/signals.py
  93. 0
    153
      thesisenv/lib/python3.6/site-packages/celery/states.py
  94. 0
    59
      thesisenv/lib/python3.6/site-packages/celery/task/__init__.py
  95. 0
    179
      thesisenv/lib/python3.6/site-packages/celery/task/base.py
  96. 0
    220
      thesisenv/lib/python3.6/site-packages/celery/task/http.py
  97. 0
    88
      thesisenv/lib/python3.6/site-packages/celery/task/sets.py
  98. 0
    12
      thesisenv/lib/python3.6/site-packages/celery/task/trace.py
  99. 0
    87
      thesisenv/lib/python3.6/site-packages/celery/tests/__init__.py
  100. 0
    0
      thesisenv/lib/python3.6/site-packages/celery/tests/app/__init__.py

+ 0
- 4
application/__init__.py View File

from __future__ import absolute_import, unicode_literals from __future__ import absolute_import, unicode_literals
# This will make sure celery is always imported when
# Django starts so that shared_task will use this app.
from .celeryapp import app as celery_app
__all__ = ['celery_app']

+ 1
- 41
application/admin.py View File

from django.contrib.auth.models import User from django.contrib.auth.models import User


from .models import Post, CustomUser from .models import Post, CustomUser
from .models import ScheduledReport, ReportRecipient, ScheduledReportGroup
from .forms import ScheduledReportForm





class CustomUserInline(admin.StackedInline): class CustomUserInline(admin.StackedInline):


admin.site.register(Post) admin.site.register(Post)



class ReportRecipientAdmin(admin.TabularInline):
model = ReportRecipient
class ScheduledReportAdmin(admin.ModelAdmin):
"""
List display for Scheduled reports in Django admin
"""
model = ScheduledReport
list_display = ('id', 'get_recipients')
inlines = [
ReportRecipientAdmin
]
form = ScheduledReportForm
def get_recipients(self, model):
recipients = model.reportrecep.all().values_list('email', flat=True)
if not recipients:
return 'No recipients added'
recipient_list = ''
for recipient in recipients:
recipient_list = recipient_list + recipient + ', '
return recipient_list[:-2]
get_recipients.short_description = 'Recipients'
get_recipients.allow_tags = True
class ScheduledReportGroupAdmin(admin.ModelAdmin):
"""
List display for ScheduledReportGroup Admin
"""
model = ScheduledReportGroup
list_display = ('get_scheduled_report_name','get_report_name')
def get_scheduled_report_name(self, model):
return model.scheduled_report.subject
def get_report_name(self, model):
return model.report.name
get_scheduled_report_name.short_description = "Scheduled Report Name"
get_report_name.short_description = "Report Name"
show_change_link = True
get_report_name.allow_tags = True
admin.site.register(ScheduledReport, ScheduledReportAdmin)
admin.site.register(ScheduledReportGroup, ScheduledReportGroupAdmin)

+ 0
- 13
application/celeryapp.py View File

from __future__ import absolute_import
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'application.settings')
from django.conf import settings

app = Celery('application')
# Using a string here means the worker don't have to serialize
# the configuration object to child processes.
app.config_from_object('django.conf:settings')
# Load task modules from all registered Django app configs.
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)

+ 0
- 25
application/forms.py View File

from datetime import datetime from datetime import datetime
from croniter import croniter from croniter import croniter
from django.forms import ModelForm, ValidationError from django.forms import ModelForm, ValidationError
from .models import ScheduledReport


class PostForm(forms.ModelForm): class PostForm(forms.ModelForm):
class Meta: class Meta:
class Meta: class Meta:
model = CustomUser model = CustomUser
fields = ['m_tags'] fields = ['m_tags']


class ScheduledReportForm(ModelForm):
class Meta:
model = ScheduledReport
fields = ['subject', 'cron_expression']
fields = ['subject', 'cron_expression']
help_texts = {'cron_expression': 'Scheduled time is considered in UTC'}
def clean(self):
cleaned_data = super(ScheduledReportForm, self).clean()
cron_expression = cleaned_data.get("cron_expression")
try:
iter = croniter(cron_expression, datetime.now())
except:
raise ValidationError("Incorrect cron expression:\
The information you must include is (in order of appearance):\
A number (or list of numbers, or range of numbers), m, representing the minute of the hour\
A number (or list of numbers, or range of numbers), h, representing the hour of the day\
A number (or list of numbers, or range of numbers), dom, representing the day of the month\
A number (or list, or range), or name (or list of names), mon, representing the month of the year\
A number (or list, or range), or name (or list of names), dow, representing the day of the week\
The asterisks (*) in our entry tell cron that for that unit of time, the job should be run every.\
Eg. */5 * * * * cron for executing every 5 mins")
return cleaned_data

+ 37
- 0
application/migrations/0002_auto_20181030_1223.py View File

# Generated by Django 2.1.2 on 2018-10-30 11:23

from django.db import migrations


class Migration(migrations.Migration):

dependencies = [
('application', '0001_initial'),
]

operations = [
migrations.RemoveField(
model_name='reportrecipient',
name='scheduled_report',
),
migrations.RemoveField(
model_name='scheduledreportgroup',
name='report',
),
migrations.RemoveField(
model_name='scheduledreportgroup',
name='scheduled_report',
),
migrations.DeleteModel(
name='Report',
),
migrations.DeleteModel(
name='ReportRecipient',
),
migrations.DeleteModel(
name='ScheduledReport',
),
migrations.DeleteModel(
name='ScheduledReportGroup',
),
]

+ 1
- 37
application/models.py View File

self.save() self.save()


def __str__(self): def __str__(self):
return self.title

class Report(models.Model):
report_text = models.TextField()

class ScheduledReport(models.Model):
"""
Contains email subject and cron expression,to evaluate when the email has to be sent
"""
subject = models.CharField(max_length=200)
last_run_at = models.DateTimeField(null=True, blank=True)
next_run_at = models.DateTimeField(null=True, blank=True)
cron_expression = models.CharField(max_length=200)
def save(self, *args, **kwargs):
"""
function to evaluate "next_run_at" using the cron expression, so that it is updated once the report is sent.
"""
self.last_run_at = datetime.now()
iter = croniter(self.cron_expression, self.last_run_at)
self.next_run_at = iter.get_next(datetime)
super(ScheduledReport, self).save(*args, **kwargs)
def __unicode__(self):
return self.subject

class ScheduledReportGroup(models.Model):
"""
Many to many mapping between reports which will be sent out in a scheduled report
"""
report = models.ForeignKey(Report, related_name='report', on_delete=models.CASCADE)
scheduled_report = models.ForeignKey(ScheduledReport,
related_name='relatedscheduledreport', on_delete=models.CASCADE)
class ReportRecipient(models.Model):
"""
Stores all the recipients of the given scheduled report
"""
email = models.EmailField()
scheduled_report = models.ForeignKey(ScheduledReport, related_name='reportrecep', on_delete=models.CASCADE)
return self.title

+ 0
- 7
application/tasks.py View File

from celery.task.schedules import crontab
from celery.decorators import periodic_task
from .email_service import send_emails
# this will run every minute, see http://celeryproject.org/docs/reference/celery.task.schedules.html#celery.task.schedules.crontab
@periodic_task(run_every=crontab(hour="*", minute="*", day_of_week="*"))
def trigger_emails():
send_emails()

+ 4
- 0
log.txt View File

[24/Oct/2018 19:03:28] INFO [mysite:191] <QuerySet [<Post: Third one>]> [24/Oct/2018 19:03:28] INFO [mysite:191] <QuerySet [<Post: Third one>]>
[24/Oct/2018 19:03:45] INFO [mysite:189] bamberg [24/Oct/2018 19:03:45] INFO [mysite:189] bamberg
[24/Oct/2018 19:03:45] INFO [mysite:191] <QuerySet [<Post: Third one>, <Post: here i go again>]> [24/Oct/2018 19:03:45] INFO [mysite:191] <QuerySet [<Post: Third one>, <Post: here i go again>]>
[30/Oct/2018 12:25:09] INFO [mysite:56] <QuerySet [<Post: Hi there>]>
[30/Oct/2018 12:25:11] INFO [mysite:56] <QuerySet [<Post: Hi there>, <Post: Bavaria>]>
[30/Oct/2018 12:25:26] INFO [mysite:189] None
[30/Oct/2018 12:25:34] INFO [mysite:189] bayern

+ 0
- 11
mysite/settings.py View File

import os import os
import re import re
import socket import socket
import djcelery


# Build paths inside the project like this: os.path.join(BASE_DIR, ...) # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
'application', 'application',
'taggit', 'taggit',
'taggit_templatetags2', 'taggit_templatetags2',
'djcelery',
'kombu.transport.django', 'kombu.transport.django',
] ]


DEBUG_TOOLBAR_CONFIG = { DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False, 'INTERCEPT_REDIRECTS': False,
} }

# Celery settings
BROKER_URL = 'django://'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'
CELERYBEAT_SCHEDULER = "djcelery.schedulers.DatabaseScheduler"
djcelery.setup_loader()

+ 0
- 155
thesisenv/lib/python3.6/site-packages/celery/__init__.py View File

# -*- coding: utf-8 -*-
"""Distributed Task Queue"""
# :copyright: (c) 2015 Ask Solem and individual contributors.
# All rights # reserved.
# :copyright: (c) 2012-2014 GoPivotal, Inc., All rights reserved.
# :copyright: (c) 2009 - 2012 Ask Solem and individual contributors,
# All rights reserved.
# :license: BSD (3 Clause), see LICENSE for more details.

from __future__ import absolute_import

import os
import sys

from collections import namedtuple

version_info_t = namedtuple(
'version_info_t', ('major', 'minor', 'micro', 'releaselevel', 'serial'),
)

SERIES = 'Cipater'
VERSION = version_info_t(3, 1, 26, '.post2', '')
__version__ = '{0.major}.{0.minor}.{0.micro}{0.releaselevel}'.format(VERSION)
__author__ = 'Ask Solem'
__contact__ = 'ask@celeryproject.org'
__homepage__ = 'http://celeryproject.org'
__docformat__ = 'restructuredtext'
__all__ = [
'Celery', 'bugreport', 'shared_task', 'task',
'current_app', 'current_task', 'maybe_signature',
'chain', 'chord', 'chunks', 'group', 'signature',
'xmap', 'xstarmap', 'uuid', 'version', '__version__',
]
VERSION_BANNER = '{0} ({1})'.format(__version__, SERIES)

# -eof meta-

if os.environ.get('C_IMPDEBUG'): # pragma: no cover
from .five import builtins
real_import = builtins.__import__

def debug_import(name, locals=None, globals=None,
fromlist=None, level=-1):
glob = globals or getattr(sys, 'emarfteg_'[::-1])(1).f_globals
importer_name = glob and glob.get('__name__') or 'unknown'
print('-- {0} imports {1}'.format(importer_name, name))
return real_import(name, locals, globals, fromlist, level)
builtins.__import__ = debug_import

# This is never executed, but tricks static analyzers (PyDev, PyCharm,
# pylint, etc.) into knowing the types of these symbols, and what
# they contain.
STATICA_HACK = True
globals()['kcah_acitats'[::-1].upper()] = False
if STATICA_HACK: # pragma: no cover
from celery.app import shared_task # noqa
from celery.app.base import Celery # noqa
from celery.app.utils import bugreport # noqa
from celery.app.task import Task # noqa
from celery._state import current_app, current_task # noqa
from celery.canvas import ( # noqa
chain, chord, chunks, group,
signature, maybe_signature, xmap, xstarmap, subtask,
)
from celery.utils import uuid # noqa

# Eventlet/gevent patching must happen before importing
# anything else, so these tools must be at top-level.


def _find_option_with_arg(argv, short_opts=None, long_opts=None):
"""Search argv for option specifying its short and longopt
alternatives.

Return the value of the option if found.

"""
for i, arg in enumerate(argv):
if arg.startswith('-'):
if long_opts and arg.startswith('--'):
name, _, val = arg.partition('=')
if name in long_opts:
return val
if short_opts and arg in short_opts:
return argv[i + 1]
raise KeyError('|'.join(short_opts or [] + long_opts or []))


def _patch_eventlet():
import eventlet
import eventlet.debug
eventlet.monkey_patch()
EVENTLET_DBLOCK = int(os.environ.get('EVENTLET_NOBLOCK', 0))
if EVENTLET_DBLOCK:
eventlet.debug.hub_blocking_detection(EVENTLET_DBLOCK)


def _patch_gevent():
from gevent import monkey, version_info
monkey.patch_all()
if version_info[0] == 0: # pragma: no cover
# Signals aren't working in gevent versions <1.0,
# and are not monkey patched by patch_all()
from gevent import signal as _gevent_signal
_signal = __import__('signal')
_signal.signal = _gevent_signal


def maybe_patch_concurrency(argv=sys.argv,
short_opts=['-P'], long_opts=['--pool'],
patches={'eventlet': _patch_eventlet,
'gevent': _patch_gevent}):
"""With short and long opt alternatives that specify the command line
option to set the pool, this makes sure that anything that needs
to be patched is completed as early as possible.
(e.g. eventlet/gevent monkey patches)."""
try:
pool = _find_option_with_arg(argv, short_opts, long_opts)
except KeyError:
pass
else:
try:
patcher = patches[pool]
except KeyError:
pass
else:
patcher()
# set up eventlet/gevent environments ASAP.
from celery import concurrency
concurrency.get_implementation(pool)

# Lazy loading
from celery import five # noqa

old_module, new_module = five.recreate_module( # pragma: no cover
__name__,
by_module={
'celery.app': ['Celery', 'bugreport', 'shared_task'],
'celery.app.task': ['Task'],
'celery._state': ['current_app', 'current_task'],
'celery.canvas': ['chain', 'chord', 'chunks', 'group',
'signature', 'maybe_signature', 'subtask',
'xmap', 'xstarmap'],
'celery.utils': ['uuid'],
},
direct={'task': 'celery.task'},
__package__='celery', __file__=__file__,
__path__=__path__, __doc__=__doc__, __version__=__version__,
__author__=__author__, __contact__=__contact__,
__homepage__=__homepage__, __docformat__=__docformat__, five=five,
VERSION=VERSION, SERIES=SERIES, VERSION_BANNER=VERSION_BANNER,
version_info_t=version_info_t,
maybe_patch_concurrency=maybe_patch_concurrency,
_find_option_with_arg=_find_option_with_arg,
)

+ 0
- 54
thesisenv/lib/python3.6/site-packages/celery/__main__.py View File

from __future__ import absolute_import

import sys

from os.path import basename

from . import maybe_patch_concurrency

__all__ = ['main']

DEPRECATED_FMT = """
The {old!r} command is deprecated, please use {new!r} instead:

$ {new_argv}

"""


def _warn_deprecated(new):
print(DEPRECATED_FMT.format(
old=basename(sys.argv[0]), new=new,
new_argv=' '.join([new] + sys.argv[1:])),
)


def main():
if 'multi' not in sys.argv:
maybe_patch_concurrency()
from celery.bin.celery import main
main()


def _compat_worker():
maybe_patch_concurrency()
_warn_deprecated('celery worker')
from celery.bin.worker import main
main()


def _compat_multi():
_warn_deprecated('celery multi')
from celery.bin.multi import main
main()


def _compat_beat():
maybe_patch_concurrency()
_warn_deprecated('celery beat')
from celery.bin.beat import main
main()


if __name__ == '__main__': # pragma: no cover
main()

+ 0
- 159
thesisenv/lib/python3.6/site-packages/celery/_state.py View File

# -*- coding: utf-8 -*-
"""
celery._state
~~~~~~~~~~~~~~~

This is an internal module containing thread state
like the ``current_app``, and ``current_task``.

This module shouldn't be used directly.

"""
from __future__ import absolute_import, print_function

import os
import sys
import threading
import weakref

from celery.local import Proxy
from celery.utils.threads import LocalStack

try:
from weakref import WeakSet as AppSet
except ImportError: # XXX Py2.6

class AppSet(object): # noqa

def __init__(self):
self._refs = set()

def add(self, app):
self._refs.add(weakref.ref(app))

def __iter__(self):
dirty = []
try:
for appref in self._refs:
app = appref()
if app is None:
dirty.append(appref)
else:
yield app
finally:
while dirty:
self._refs.discard(dirty.pop())

__all__ = ['set_default_app', 'get_current_app', 'get_current_task',
'get_current_worker_task', 'current_app', 'current_task',
'connect_on_app_finalize']

#: Global default app used when no current app.
default_app = None

#: List of all app instances (weakrefs), must not be used directly.
_apps = AppSet()

#: global set of functions to call whenever a new app is finalized
#: E.g. Shared tasks, and builtin tasks are created
#: by adding callbacks here.
_on_app_finalizers = set()

_task_join_will_block = False


def connect_on_app_finalize(callback):
_on_app_finalizers.add(callback)
return callback


def _announce_app_finalized(app):
callbacks = set(_on_app_finalizers)
for callback in callbacks:
callback(app)


def _set_task_join_will_block(blocks):
global _task_join_will_block
_task_join_will_block = blocks


def task_join_will_block():
return _task_join_will_block


class _TLS(threading.local):
#: Apps with the :attr:`~celery.app.base.BaseApp.set_as_current` attribute
#: sets this, so it will always contain the last instantiated app,
#: and is the default app returned by :func:`app_or_default`.
current_app = None
_tls = _TLS()

_task_stack = LocalStack()


def set_default_app(app):
global default_app
default_app = app


def _get_current_app():
if default_app is None:
#: creates the global fallback app instance.
from celery.app import Celery
set_default_app(Celery(
'default',
loader=os.environ.get('CELERY_LOADER') or 'default',
fixups=[],
set_as_current=False, accept_magic_kwargs=True,
))
return _tls.current_app or default_app


def _set_current_app(app):
_tls.current_app = app


C_STRICT_APP = os.environ.get('C_STRICT_APP')
if os.environ.get('C_STRICT_APP'): # pragma: no cover
def get_current_app():
raise Exception('USES CURRENT APP')
import traceback
print('-- USES CURRENT_APP', file=sys.stderr) # noqa+
traceback.print_stack(file=sys.stderr)
return _get_current_app()
else:
get_current_app = _get_current_app


def get_current_task():
"""Currently executing task."""
return _task_stack.top


def get_current_worker_task():
"""Currently executing task, that was applied by the worker.

This is used to differentiate between the actual task
executed by the worker and any task that was called within
a task (using ``task.__call__`` or ``task.apply``)

"""
for task in reversed(_task_stack.stack):
if not task.request.called_directly:
return task


#: Proxy to current app.
current_app = Proxy(get_current_app)

#: Proxy to current task.
current_task = Proxy(get_current_task)


def _register_app(app):
_apps.add(app)


def _get_active_apps():
return _apps

+ 0
- 150
thesisenv/lib/python3.6/site-packages/celery/app/__init__.py View File

# -*- coding: utf-8 -*-
"""
celery.app
~~~~~~~~~~

Celery Application.

"""
from __future__ import absolute_import

import os

from celery.local import Proxy
from celery import _state
from celery._state import (
get_current_app as current_app,
get_current_task as current_task,
connect_on_app_finalize, set_default_app, _get_active_apps, _task_stack,
)
from celery.utils import gen_task_name

from .base import Celery, AppPickler

__all__ = ['Celery', 'AppPickler', 'default_app', 'app_or_default',
'bugreport', 'enable_trace', 'disable_trace', 'shared_task',
'set_default_app', 'current_app', 'current_task',
'push_current_task', 'pop_current_task']

#: Proxy always returning the app set as default.
default_app = Proxy(lambda: _state.default_app)

#: Function returning the app provided or the default app if none.
#:
#: The environment variable :envvar:`CELERY_TRACE_APP` is used to
#: trace app leaks. When enabled an exception is raised if there
#: is no active app.
app_or_default = None

#: The 'default' loader is the default loader used by old applications.
#: This is deprecated and should no longer be used as it's set too early
#: to be affected by --loader argument.
default_loader = os.environ.get('CELERY_LOADER') or 'default' # XXX


#: Function used to push a task to the thread local stack
#: keeping track of the currently executing task.
#: You must remember to pop the task after.
push_current_task = _task_stack.push

#: Function used to pop a task from the thread local stack
#: keeping track of the currently executing task.
pop_current_task = _task_stack.pop


def bugreport(app=None):
return (app or current_app()).bugreport()


def _app_or_default(app=None):
if app is None:
return _state.get_current_app()
return app


def _app_or_default_trace(app=None): # pragma: no cover
from traceback import print_stack
from billiard import current_process
if app is None:
if getattr(_state._tls, 'current_app', None):
print('-- RETURNING TO CURRENT APP --') # noqa+
print_stack()
return _state._tls.current_app
if current_process()._name == 'MainProcess':
raise Exception('DEFAULT APP')
print('-- RETURNING TO DEFAULT APP --') # noqa+
print_stack()
return _state.default_app
return app


def enable_trace():
global app_or_default
app_or_default = _app_or_default_trace


def disable_trace():
global app_or_default
app_or_default = _app_or_default

if os.environ.get('CELERY_TRACE_APP'): # pragma: no cover
enable_trace()
else:
disable_trace()

App = Celery # XXX Compat


def shared_task(*args, **kwargs):
"""Create shared tasks (decorator).
Will return a proxy that always takes the task from the current apps
task registry.

This can be used by library authors to create tasks that will work
for any app environment.

Example:

>>> from celery import Celery, shared_task
>>> @shared_task
... def add(x, y):
... return x + y

>>> app1 = Celery(broker='amqp://')
>>> add.app is app1
True

>>> app2 = Celery(broker='redis://')
>>> add.app is app2

"""

def create_shared_task(**options):

def __inner(fun):
name = options.get('name')
# Set as shared task so that unfinalized apps,
# and future apps will load the task.
connect_on_app_finalize(
lambda app: app._task_from_fun(fun, **options)
)

# Force all finalized apps to take this task as well.
for app in _get_active_apps():
if app.finalized:
with app._finalize_mutex:
app._task_from_fun(fun, **options)

# Return a proxy that always gets the task from the current
# apps task registry.
def task_by_cons():
app = current_app()
return app.tasks[
name or gen_task_name(app, fun.__name__, fun.__module__)
]
return Proxy(task_by_cons)
return __inner

if len(args) == 1 and callable(args[0]):
return create_shared_task(**kwargs)(args[0])
return create_shared_task(*args, **kwargs)

+ 0
- 512
thesisenv/lib/python3.6/site-packages/celery/app/amqp.py View File

# -*- coding: utf-8 -*-
"""
celery.app.amqp
~~~~~~~~~~~~~~~

Sending and receiving messages using Kombu.

"""
from __future__ import absolute_import

import numbers

from datetime import timedelta
from weakref import WeakValueDictionary

from kombu import Connection, Consumer, Exchange, Producer, Queue
from kombu.common import Broadcast
from kombu.pools import ProducerPool
from kombu.utils import cached_property, uuid
from kombu.utils.encoding import safe_repr
from kombu.utils.functional import maybe_list

from celery import signals
from celery.five import items, string_t
from celery.utils.text import indent as textindent
from celery.utils.timeutils import to_utc

from . import app_or_default
from . import routes as _routes

__all__ = ['AMQP', 'Queues', 'TaskProducer', 'TaskConsumer']

#: earliest date supported by time.mktime.
INT_MIN = -2147483648

#: Human readable queue declaration.
QUEUE_FORMAT = """
.> {0.name:<16} exchange={0.exchange.name}({0.exchange.type}) \
key={0.routing_key}
"""


class Queues(dict):
"""Queue name⇒ declaration mapping.

:param queues: Initial list/tuple or dict of queues.
:keyword create_missing: By default any unknown queues will be
added automatically, but if disabled
the occurrence of unknown queues
in `wanted` will raise :exc:`KeyError`.
:keyword ha_policy: Default HA policy for queues with none set.


"""
#: If set, this is a subset of queues to consume from.
#: The rest of the queues are then used for routing only.
_consume_from = None

def __init__(self, queues=None, default_exchange=None,
create_missing=True, ha_policy=None, autoexchange=None):
dict.__init__(self)
self.aliases = WeakValueDictionary()
self.default_exchange = default_exchange
self.create_missing = create_missing
self.ha_policy = ha_policy
self.autoexchange = Exchange if autoexchange is None else autoexchange
if isinstance(queues, (tuple, list)):
queues = dict((q.name, q) for q in queues)
for name, q in items(queues or {}):
self.add(q) if isinstance(q, Queue) else self.add_compat(name, **q)

def __getitem__(self, name):
try:
return self.aliases[name]
except KeyError:
return dict.__getitem__(self, name)

def __setitem__(self, name, queue):
if self.default_exchange and (not queue.exchange or
not queue.exchange.name):
queue.exchange = self.default_exchange
dict.__setitem__(self, name, queue)
if queue.alias:
self.aliases[queue.alias] = queue

def __missing__(self, name):
if self.create_missing:
return self.add(self.new_missing(name))
raise KeyError(name)

def add(self, queue, **kwargs):
"""Add new queue.

The first argument can either be a :class:`kombu.Queue` instance,
or the name of a queue. If the former the rest of the keyword
arguments are ignored, and options are simply taken from the queue
instance.

:param queue: :class:`kombu.Queue` instance or name of the queue.
:keyword exchange: (if named) specifies exchange name.
:keyword routing_key: (if named) specifies binding key.
:keyword exchange_type: (if named) specifies type of exchange.
:keyword \*\*options: (if named) Additional declaration options.

"""
if not isinstance(queue, Queue):
return self.add_compat(queue, **kwargs)
if self.ha_policy:
if queue.queue_arguments is None:
queue.queue_arguments = {}
self._set_ha_policy(queue.queue_arguments)
self[queue.name] = queue
return queue

def add_compat(self, name, **options):
# docs used to use binding_key as routing key
options.setdefault('routing_key', options.get('binding_key'))
if options['routing_key'] is None:
options['routing_key'] = name
if self.ha_policy is not None:
self._set_ha_policy(options.setdefault('queue_arguments', {}))
q = self[name] = Queue.from_dict(name, **options)
return q

def _set_ha_policy(self, args):
policy = self.ha_policy
if isinstance(policy, (list, tuple)):
return args.update({'x-ha-policy': 'nodes',
'x-ha-policy-params': list(policy)})
args['x-ha-policy'] = policy

def format(self, indent=0, indent_first=True):
"""Format routing table into string for log dumps."""
active = self.consume_from
if not active:
return ''
info = [QUEUE_FORMAT.strip().format(q)
for _, q in sorted(items(active))]
if indent_first:
return textindent('\n'.join(info), indent)
return info[0] + '\n' + textindent('\n'.join(info[1:]), indent)

def select_add(self, queue, **kwargs):
"""Add new task queue that will be consumed from even when
a subset has been selected using the :option:`-Q` option."""
q = self.add(queue, **kwargs)
if self._consume_from is not None:
self._consume_from[q.name] = q
return q

def select(self, include):
"""Sets :attr:`consume_from` by selecting a subset of the
currently defined queues.

:param include: Names of queues to consume from.
Can be iterable or string.
"""
if include:
self._consume_from = dict((name, self[name])
for name in maybe_list(include))
select_subset = select # XXX compat

def deselect(self, exclude):
"""Deselect queues so that they will not be consumed from.

:param exclude: Names of queues to avoid consuming from.
Can be iterable or string.

"""
if exclude:
exclude = maybe_list(exclude)
if self._consume_from is None:
# using selection
return self.select(k for k in self if k not in exclude)
# using all queues
for queue in exclude:
self._consume_from.pop(queue, None)
select_remove = deselect # XXX compat

def new_missing(self, name):
return Queue(name, self.autoexchange(name), name)

@property
def consume_from(self):
if self._consume_from is not None:
return self._consume_from
return self


class TaskProducer(Producer):
app = None
auto_declare = False
retry = False
retry_policy = None
utc = True
event_dispatcher = None
send_sent_event = False

def __init__(self, channel=None, exchange=None, *args, **kwargs):
self.retry = kwargs.pop('retry', self.retry)
self.retry_policy = kwargs.pop('retry_policy',
self.retry_policy or {})
self.send_sent_event = kwargs.pop('send_sent_event',
self.send_sent_event)
exchange = exchange or self.exchange
self.queues = self.app.amqp.queues # shortcut
self.default_queue = self.app.amqp.default_queue
self._default_mode = self.app.conf.CELERY_DEFAULT_DELIVERY_MODE
super(TaskProducer, self).__init__(channel, exchange, *args, **kwargs)

def publish_task(self, task_name, task_args=None, task_kwargs=None,
countdown=None, eta=None, task_id=None, group_id=None,
taskset_id=None, # compat alias to group_id
expires=None, exchange=None, exchange_type=None,
event_dispatcher=None, retry=None, retry_policy=None,
queue=None, now=None, retries=0, chord=None,
callbacks=None, errbacks=None, routing_key=None,
serializer=None, delivery_mode=None, compression=None,
reply_to=None, time_limit=None, soft_time_limit=None,
declare=None, headers=None,
send_before_publish=signals.before_task_publish.send,
before_receivers=signals.before_task_publish.receivers,
send_after_publish=signals.after_task_publish.send,
after_receivers=signals.after_task_publish.receivers,
send_task_sent=signals.task_sent.send, # XXX deprecated
sent_receivers=signals.task_sent.receivers,
**kwargs):
"""Send task message."""
retry = self.retry if retry is None else retry
headers = {} if headers is None else headers

qname = queue
if queue is None and exchange is None:
queue = self.default_queue
if queue is not None:
if isinstance(queue, string_t):
qname, queue = queue, self.queues[queue]
else:
qname = queue.name
exchange = exchange or queue.exchange.name
routing_key = routing_key or queue.routing_key
if declare is None and queue and not isinstance(queue, Broadcast):
declare = [queue]
if delivery_mode is None:
delivery_mode = self._default_mode

# merge default and custom policy
retry = self.retry if retry is None else retry
_rp = (dict(self.retry_policy, **retry_policy) if retry_policy
else self.retry_policy)
task_id = task_id or uuid()
task_args = task_args or []
task_kwargs = task_kwargs or {}
if not isinstance(task_args, (list, tuple)):
raise ValueError('task args must be a list or tuple')
if not isinstance(task_kwargs, dict):
raise ValueError('task kwargs must be a dictionary')
if countdown: # Convert countdown to ETA.
self._verify_seconds(countdown, 'countdown')
now = now or self.app.now()
eta = now + timedelta(seconds=countdown)
if self.utc:
eta = to_utc(eta).astimezone(self.app.timezone)
if isinstance(expires, numbers.Real):
self._verify_seconds(expires, 'expires')
now = now or self.app.now()
expires = now + timedelta(seconds=expires)
if self.utc:
expires = to_utc(expires).astimezone(self.app.timezone)
eta = eta and eta.isoformat()
expires = expires and expires.isoformat()

body = {
'task': task_name,
'id': task_id,
'args': task_args,
'kwargs': task_kwargs,
'retries': retries or 0,
'eta': eta,
'expires': expires,
'utc': self.utc,
'callbacks': callbacks,
'errbacks': errbacks,
'timelimit': (time_limit, soft_time_limit),
'taskset': group_id or taskset_id,
'chord': chord,
}

if before_receivers:
send_before_publish(
sender=task_name, body=body,
exchange=exchange,
routing_key=routing_key,
declare=declare,
headers=headers,
properties=kwargs,
retry_policy=retry_policy,
)

self.publish(
body,
exchange=exchange, routing_key=routing_key,
serializer=serializer or self.serializer,
compression=compression or self.compression,
headers=headers,
retry=retry, retry_policy=_rp,
reply_to=reply_to,
correlation_id=task_id,
delivery_mode=delivery_mode, declare=declare,
**kwargs
)

if after_receivers:
send_after_publish(sender=task_name, body=body,
exchange=exchange, routing_key=routing_key)

if sent_receivers: # XXX deprecated
send_task_sent(sender=task_name, task_id=task_id,
task=task_name, args=task_args,
kwargs=task_kwargs, eta=eta,
taskset=group_id or taskset_id)
if self.send_sent_event:
evd = event_dispatcher or self.event_dispatcher
exname = exchange or self.exchange
if isinstance(exname, Exchange):
exname = exname.name
evd.publish(
'task-sent',
{
'uuid': task_id,
'name': task_name,
'args': safe_repr(task_args),
'kwargs': safe_repr(task_kwargs),
'retries': retries,
'eta': eta,
'expires': expires,
'queue': qname,
'exchange': exname,
'routing_key': routing_key,
},
self, retry=retry, retry_policy=retry_policy,
)
return task_id
delay_task = publish_task # XXX Compat

def _verify_seconds(self, s, what):
if s < INT_MIN:
raise ValueError('%s is out of range: %r' % (what, s))
return s

@cached_property
def event_dispatcher(self):
# We call Dispatcher.publish with a custom producer
# so don't need the dispatcher to be "enabled".
return self.app.events.Dispatcher(enabled=False)


class TaskPublisher(TaskProducer):
"""Deprecated version of :class:`TaskProducer`."""

def __init__(self, channel=None, exchange=None, *args, **kwargs):
self.app = app_or_default(kwargs.pop('app', self.app))
self.retry = kwargs.pop('retry', self.retry)
self.retry_policy = kwargs.pop('retry_policy',
self.retry_policy or {})
exchange = exchange or self.exchange
if not isinstance(exchange, Exchange):
exchange = Exchange(exchange,
kwargs.pop('exchange_type', 'direct'))
self.queues = self.app.amqp.queues # shortcut
super(TaskPublisher, self).__init__(channel, exchange, *args, **kwargs)


class TaskConsumer(Consumer):
app = None

def __init__(self, channel, queues=None, app=None, accept=None, **kw):
self.app = app or self.app
if accept is None:
accept = self.app.conf.CELERY_ACCEPT_CONTENT
super(TaskConsumer, self).__init__(
channel,
queues or list(self.app.amqp.queues.consume_from.values()),
accept=accept,
**kw
)


class AMQP(object):
Connection = Connection
Consumer = Consumer

#: compat alias to Connection
BrokerConnection = Connection

producer_cls = TaskProducer
consumer_cls = TaskConsumer
queues_cls = Queues

#: Cached and prepared routing table.
_rtable = None

#: Underlying producer pool instance automatically
#: set by the :attr:`producer_pool`.
_producer_pool = None

# Exchange class/function used when defining automatic queues.
# E.g. you can use ``autoexchange = lambda n: None`` to use the
# amqp default exchange, which is a shortcut to bypass routing
# and instead send directly to the queue named in the routing key.
autoexchange = None

def __init__(self, app):
self.app = app

def flush_routes(self):
self._rtable = _routes.prepare(self.app.conf.CELERY_ROUTES)

def Queues(self, queues, create_missing=None, ha_policy=None,
autoexchange=None):
"""Create new :class:`Queues` instance, using queue defaults
from the current configuration."""
conf = self.app.conf
if create_missing is None:
create_missing = conf.CELERY_CREATE_MISSING_QUEUES
if ha_policy is None:
ha_policy = conf.CELERY_QUEUE_HA_POLICY
if not queues and conf.CELERY_DEFAULT_QUEUE:
queues = (Queue(conf.CELERY_DEFAULT_QUEUE,
exchange=self.default_exchange,
routing_key=conf.CELERY_DEFAULT_ROUTING_KEY), )
autoexchange = (self.autoexchange if autoexchange is None
else autoexchange)
return self.queues_cls(
queues, self.default_exchange, create_missing,
ha_policy, autoexchange,
)

def Router(self, queues=None, create_missing=None):
"""Return the current task router."""
return _routes.Router(self.routes, queues or self.queues,
self.app.either('CELERY_CREATE_MISSING_QUEUES',
create_missing), app=self.app)

@cached_property
def TaskConsumer(self):
"""Return consumer configured to consume from the queues
we are configured for (``app.amqp.queues.consume_from``)."""
return self.app.subclass_with_self(self.consumer_cls,
reverse='amqp.TaskConsumer')
get_task_consumer = TaskConsumer # XXX compat

@cached_property
def TaskProducer(self):
"""Return publisher used to send tasks.

You should use `app.send_task` instead.

"""
conf = self.app.conf
return self.app.subclass_with_self(
self.producer_cls,
reverse='amqp.TaskProducer',
exchange=self.default_exchange,
routing_key=conf.CELERY_DEFAULT_ROUTING_KEY,
serializer=conf.CELERY_TASK_SERIALIZER,
compression=conf.CELERY_MESSAGE_COMPRESSION,
retry=conf.CELERY_TASK_PUBLISH_RETRY,
retry_policy=conf.CELERY_TASK_PUBLISH_RETRY_POLICY,
send_sent_event=conf.CELERY_SEND_TASK_SENT_EVENT,
utc=conf.CELERY_ENABLE_UTC,
)
TaskPublisher = TaskProducer # compat

@cached_property
def default_queue(self):
return self.queues[self.app.conf.CELERY_DEFAULT_QUEUE]

@cached_property
def queues(self):
"""Queue name⇒ declaration mapping."""
return self.Queues(self.app.conf.CELERY_QUEUES)

@queues.setter # noqa
def queues(self, queues):
return self.Queues(queues)

@property
def routes(self):
if self._rtable is None:
self.flush_routes()
return self._rtable

@cached_property
def router(self):
return self.Router()

@property
def producer_pool(self):
if self._producer_pool is None:
self._producer_pool = ProducerPool(
self.app.pool,
limit=self.app.pool.limit,
Producer=self.TaskProducer,
)
return self._producer_pool
publisher_pool = producer_pool # compat alias

@cached_property
def default_exchange(self):
return Exchange(self.app.conf.CELERY_DEFAULT_EXCHANGE,
self.app.conf.CELERY_DEFAULT_EXCHANGE_TYPE)

+ 0
- 58
thesisenv/lib/python3.6/site-packages/celery/app/annotations.py View File

# -*- coding: utf-8 -*-
"""
celery.app.annotations
~~~~~~~~~~~~~~~~~~~~~~

Annotations is a nice term for monkey patching
task classes in the configuration.

This prepares and performs the annotations in the
:setting:`CELERY_ANNOTATIONS` setting.

"""
from __future__ import absolute_import

from celery.five import string_t
from celery.utils.functional import firstmethod, mlazy
from celery.utils.imports import instantiate

_first_match = firstmethod('annotate')
_first_match_any = firstmethod('annotate_any')

__all__ = ['MapAnnotation', 'prepare', 'resolve_all']


class MapAnnotation(dict):

def annotate_any(self):
try:
return dict(self['*'])
except KeyError:
pass

def annotate(self, task):
try:
return dict(self[task.name])
except KeyError:
pass


def prepare(annotations):
"""Expands the :setting:`CELERY_ANNOTATIONS` setting."""

def expand_annotation(annotation):
if isinstance(annotation, dict):
return MapAnnotation(annotation)
elif isinstance(annotation, string_t):
return mlazy(instantiate, annotation)
return annotation

if annotations is None:
return ()
elif not isinstance(annotations, (list, tuple)):
annotations = (annotations, )
return [expand_annotation(anno) for anno in annotations]


def resolve_all(anno, task):
return (x for x in (_first_match(anno, task), _first_match_any(anno)) if x)

+ 0
- 675
thesisenv/lib/python3.6/site-packages/celery/app/base.py View File

# -*- coding: utf-8 -*-
"""
celery.app.base
~~~~~~~~~~~~~~~

Actual App instance implementation.

"""
from __future__ import absolute_import

import os
import threading
import warnings

from collections import defaultdict, deque
from copy import deepcopy
from operator import attrgetter

from amqp import promise
from billiard.util import register_after_fork
from kombu.clocks import LamportClock
from kombu.common import oid_from
from kombu.utils import cached_property, uuid

from celery import platforms
from celery import signals
from celery._state import (
_task_stack, get_current_app, _set_current_app, set_default_app,
_register_app, get_current_worker_task, connect_on_app_finalize,
_announce_app_finalized,
)
from celery.exceptions import AlwaysEagerIgnored, ImproperlyConfigured
from celery.five import values
from celery.loaders import get_loader_cls
from celery.local import PromiseProxy, maybe_evaluate
from celery.utils.functional import first, maybe_list
from celery.utils.imports import instantiate, symbol_by_name
from celery.utils.objects import FallbackContext, mro_lookup

from .annotations import prepare as prepare_annotations
from .defaults import DEFAULTS, find_deprecated_settings
from .registry import TaskRegistry
from .utils import (
AppPickler, Settings, bugreport, _unpickle_app, _unpickle_app_v2, appstr,
)

# Load all builtin tasks
from . import builtins # noqa

__all__ = ['Celery']

_EXECV = os.environ.get('FORKED_BY_MULTIPROCESSING')
BUILTIN_FIXUPS = frozenset([
'celery.fixups.django:fixup',
])

ERR_ENVVAR_NOT_SET = """\
The environment variable {0!r} is not set,
and as such the configuration could not be loaded.
Please set this variable and make it point to
a configuration module."""

_after_fork_registered = False


def app_has_custom(app, attr):
return mro_lookup(app.__class__, attr, stop=(Celery, object),
monkey_patched=[__name__])


def _unpickle_appattr(reverse_name, args):
"""Given an attribute name and a list of args, gets
the attribute from the current app and calls it."""
return get_current_app()._rgetattr(reverse_name)(*args)


def _global_after_fork(obj):
# Previously every app would call:
# `register_after_fork(app, app._after_fork)`
# but this created a leak as `register_after_fork` stores concrete object
# references and once registered an object cannot be removed without
# touching and iterating over the private afterfork registry list.
#
# See Issue #1949
from celery import _state
from multiprocessing import util as mputil
for app in _state._apps:
try:
app._after_fork(obj)
except Exception as exc:
if mputil._logger:
mputil._logger.info(
'after forker raised exception: %r', exc, exc_info=1)


def _ensure_after_fork():
global _after_fork_registered
_after_fork_registered = True
register_after_fork(_global_after_fork, _global_after_fork)


class Celery(object):
#: This is deprecated, use :meth:`reduce_keys` instead
Pickler = AppPickler

SYSTEM = platforms.SYSTEM
IS_OSX, IS_WINDOWS = platforms.IS_OSX, platforms.IS_WINDOWS

amqp_cls = 'celery.app.amqp:AMQP'
backend_cls = None
events_cls = 'celery.events:Events'
loader_cls = 'celery.loaders.app:AppLoader'
log_cls = 'celery.app.log:Logging'
control_cls = 'celery.app.control:Control'
task_cls = 'celery.app.task:Task'
registry_cls = TaskRegistry
_fixups = None
_pool = None
builtin_fixups = BUILTIN_FIXUPS

def __init__(self, main=None, loader=None, backend=None,
amqp=None, events=None, log=None, control=None,
set_as_current=True, accept_magic_kwargs=False,
tasks=None, broker=None, include=None, changes=None,
config_source=None, fixups=None, task_cls=None,
autofinalize=True, **kwargs):
self.clock = LamportClock()
self.main = main
self.amqp_cls = amqp or self.amqp_cls
self.events_cls = events or self.events_cls
self.loader_cls = loader or self.loader_cls
self.log_cls = log or self.log_cls
self.control_cls = control or self.control_cls
self.task_cls = task_cls or self.task_cls
self.set_as_current = set_as_current
self.registry_cls = symbol_by_name(self.registry_cls)
self.accept_magic_kwargs = accept_magic_kwargs
self.user_options = defaultdict(set)
self.steps = defaultdict(set)
self.autofinalize = autofinalize

self.configured = False
self._config_source = config_source
self._pending_defaults = deque()

self.finalized = False
self._finalize_mutex = threading.Lock()
self._pending = deque()
self._tasks = tasks
if not isinstance(self._tasks, TaskRegistry):
self._tasks = TaskRegistry(self._tasks or {})

# If the class defines a custom __reduce_args__ we need to use
# the old way of pickling apps, which is pickling a list of
# args instead of the new way that pickles a dict of keywords.
self._using_v1_reduce = app_has_custom(self, '__reduce_args__')

# these options are moved to the config to
# simplify pickling of the app object.
self._preconf = changes or {}
if broker:
self._preconf['BROKER_URL'] = broker
if backend:
self._preconf['CELERY_RESULT_BACKEND'] = backend
if include:
self._preconf['CELERY_IMPORTS'] = include

# - Apply fixups.
self.fixups = set(self.builtin_fixups) if fixups is None else fixups
# ...store fixup instances in _fixups to keep weakrefs alive.
self._fixups = [symbol_by_name(fixup)(self) for fixup in self.fixups]

if self.set_as_current:
self.set_current()

self.on_init()
_register_app(self)

def set_current(self):
_set_current_app(self)

def set_default(self):
set_default_app(self)

def __enter__(self):
return self

def __exit__(self, *exc_info):
self.close()

def close(self):
self._maybe_close_pool()

def on_init(self):
"""Optional callback called at init."""
pass

def start(self, argv=None):
return instantiate(
'celery.bin.celery:CeleryCommand',
app=self).execute_from_commandline(argv)

def worker_main(self, argv=None):
return instantiate(
'celery.bin.worker:worker',
app=self).execute_from_commandline(argv)

def task(self, *args, **opts):
"""Creates new task class from any callable."""
if _EXECV and not opts.get('_force_evaluate'):
# When using execv the task in the original module will point to a
# different app, so doing things like 'add.request' will point to
# a differnt task instance. This makes sure it will always use
# the task instance from the current app.
# Really need a better solution for this :(
from . import shared_task
return shared_task(*args, _force_evaluate=True, **opts)

def inner_create_task_cls(shared=True, filter=None, **opts):
_filt = filter # stupid 2to3

def _create_task_cls(fun):
if shared:
def cons(app):
return app._task_from_fun(fun, **opts)
cons.__name__ = fun.__name__
connect_on_app_finalize(cons)
if self.accept_magic_kwargs: # compat mode
task = self._task_from_fun(fun, **opts)
if filter:
task = filter(task)
return task

if self.finalized or opts.get('_force_evaluate'):
ret = self._task_from_fun(fun, **opts)
else:
# return a proxy object that evaluates on first use
ret = PromiseProxy(self._task_from_fun, (fun, ), opts,
__doc__=fun.__doc__)
self._pending.append(ret)
if _filt:
return _filt(ret)
return ret

return _create_task_cls

if len(args) == 1:
if callable(args[0]):
return inner_create_task_cls(**opts)(*args)
raise TypeError('argument 1 to @task() must be a callable')
if args:
raise TypeError(
'@task() takes exactly 1 argument ({0} given)'.format(
sum([len(args), len(opts)])))
return inner_create_task_cls(**opts)

def _task_from_fun(self, fun, **options):
if not self.finalized and not self.autofinalize:
raise RuntimeError('Contract breach: app not finalized')
base = options.pop('base', None) or self.Task
bind = options.pop('bind', False)

T = type(fun.__name__, (base, ), dict({
'app': self,
'accept_magic_kwargs': False,
'run': fun if bind else staticmethod(fun),
'_decorated': True,
'__doc__': fun.__doc__,
'__module__': fun.__module__,
'__wrapped__': fun}, **options))()
task = self._tasks[T.name] # return global instance.
return task

def finalize(self, auto=False):
with self._finalize_mutex:
if not self.finalized:
if auto and not self.autofinalize:
raise RuntimeError('Contract breach: app not finalized')
self.finalized = True
_announce_app_finalized(self)

pending = self._pending
while pending:
maybe_evaluate(pending.popleft())

for task in values(self._tasks):
task.bind(self)

def add_defaults(self, fun):
if not callable(fun):
d, fun = fun, lambda: d
if self.configured:
return self.conf.add_defaults(fun())
self._pending_defaults.append(fun)

def config_from_object(self, obj, silent=False, force=False):
self._config_source = obj
if force or self.configured:
del(self.conf)
return self.loader.config_from_object(obj, silent=silent)

def config_from_envvar(self, variable_name, silent=False, force=False):
module_name = os.environ.get(variable_name)
if not module_name:
if silent:
return False
raise ImproperlyConfigured(
ERR_ENVVAR_NOT_SET.format(variable_name))
return self.config_from_object(module_name, silent=silent, force=force)

def config_from_cmdline(self, argv, namespace='celery'):
self.conf.update(self.loader.cmdline_config_parser(argv, namespace))

def setup_security(self, allowed_serializers=None, key=None, cert=None,
store=None, digest='sha1', serializer='json'):
from celery.security import setup_security
return setup_security(allowed_serializers, key, cert,
store, digest, serializer, app=self)

def autodiscover_tasks(self, packages, related_name='tasks', force=False):
if force:
return self._autodiscover_tasks(packages, related_name)
signals.import_modules.connect(promise(
self._autodiscover_tasks, (packages, related_name),
), weak=False, sender=self)

def _autodiscover_tasks(self, packages, related_name='tasks', **kwargs):
# argument may be lazy
packages = packages() if callable(packages) else packages
self.loader.autodiscover_tasks(packages, related_name)

def send_task(self, name, args=None, kwargs=None, countdown=None,
eta=None, task_id=None, producer=None, connection=None,
router=None, result_cls=None, expires=None,
publisher=None, link=None, link_error=None,
add_to_parent=True, reply_to=None, **options):
task_id = task_id or uuid()
producer = producer or publisher # XXX compat
router = router or self.amqp.router
conf = self.conf
if conf.CELERY_ALWAYS_EAGER: # pragma: no cover
warnings.warn(AlwaysEagerIgnored(
'CELERY_ALWAYS_EAGER has no effect on send_task',
), stacklevel=2)
options = router.route(options, name, args, kwargs)
if connection:
producer = self.amqp.TaskProducer(connection)
with self.producer_or_acquire(producer) as P:
self.backend.on_task_call(P, task_id)
task_id = P.publish_task(
name, args, kwargs, countdown=countdown, eta=eta,
task_id=task_id, expires=expires,
callbacks=maybe_list(link), errbacks=maybe_list(link_error),
reply_to=reply_to or self.oid, **options
)
result = (result_cls or self.AsyncResult)(task_id)
if add_to_parent:
parent = get_current_worker_task()
if parent:
parent.add_trail(result)
return result

def connection(self, hostname=None, userid=None, password=None,
virtual_host=None, port=None, ssl=None,
connect_timeout=None, transport=None,
transport_options=None, heartbeat=None,
login_method=None, failover_strategy=None, **kwargs):
conf = self.conf
return self.amqp.Connection(
hostname or conf.BROKER_URL,
userid or conf.BROKER_USER,
password or conf.BROKER_PASSWORD,
virtual_host or conf.BROKER_VHOST,
port or conf.BROKER_PORT,
transport=transport or conf.BROKER_TRANSPORT,
ssl=self.either('BROKER_USE_SSL', ssl),
heartbeat=heartbeat,
login_method=login_method or conf.BROKER_LOGIN_METHOD,
failover_strategy=(
failover_strategy or conf.BROKER_FAILOVER_STRATEGY
),
transport_options=dict(
conf.BROKER_TRANSPORT_OPTIONS, **transport_options or {}
),
connect_timeout=self.either(
'BROKER_CONNECTION_TIMEOUT', connect_timeout
),
)
broker_connection = connection

def _acquire_connection(self, pool=True):
"""Helper for :meth:`connection_or_acquire`."""
if pool:
return self.pool.acquire(block=True)
return self.connection()

def connection_or_acquire(self, connection=None, pool=True, *_, **__):
return FallbackContext(connection, self._acquire_connection, pool=pool)
default_connection = connection_or_acquire # XXX compat

def producer_or_acquire(self, producer=None):
return FallbackContext(
producer, self.amqp.producer_pool.acquire, block=True,
)
default_producer = producer_or_acquire # XXX compat

def prepare_config(self, c):
"""Prepare configuration before it is merged with the defaults."""
return find_deprecated_settings(c)

def now(self):
return self.loader.now(utc=self.conf.CELERY_ENABLE_UTC)

def mail_admins(self, subject, body, fail_silently=False):
if self.conf.ADMINS:
to = [admin_email for _, admin_email in self.conf.ADMINS]
return self.loader.mail_admins(
subject, body, fail_silently, to=to,
sender=self.conf.SERVER_EMAIL,
host=self.conf.EMAIL_HOST,
port=self.conf.EMAIL_PORT,
user=self.conf.EMAIL_HOST_USER,
password=self.conf.EMAIL_HOST_PASSWORD,
timeout=self.conf.EMAIL_TIMEOUT,
use_ssl=self.conf.EMAIL_USE_SSL,
use_tls=self.conf.EMAIL_USE_TLS,
)

def select_queues(self, queues=None):
return self.amqp.queues.select(queues)

def either(self, default_key, *values):
"""Fallback to the value of a configuration key if none of the
`*values` are true."""
return first(None, values) or self.conf.get(default_key)

def bugreport(self):
return bugreport(self)

def _get_backend(self):
from celery.backends import get_backend_by_url
backend, url = get_backend_by_url(
self.backend_cls or self.conf.CELERY_RESULT_BACKEND,
self.loader)
return backend(app=self, url=url)

def on_configure(self):
"""Callback calld when the app loads configuration"""
pass

def _get_config(self):
self.on_configure()
if self._config_source:
self.loader.config_from_object(self._config_source)
self.configured = True
s = Settings({}, [self.prepare_config(self.loader.conf),
deepcopy(DEFAULTS)])
# load lazy config dict initializers.
pending = self._pending_defaults
while pending:
s.add_defaults(maybe_evaluate(pending.popleft()()))

# preconf options must be explicitly set in the conf, and not
# as defaults or they will not be pickled with the app instance.
# This will cause errors when `CELERYD_FORCE_EXECV=True` as
# the workers will not have a BROKER_URL, CELERY_RESULT_BACKEND,
# or CELERY_IMPORTS set in the config.
if self._preconf:
s.update(self._preconf)
return s

def _after_fork(self, obj_):
self._maybe_close_pool()

def _maybe_close_pool(self):
pool, self._pool = self._pool, None
if pool is not None:
pool.force_close_all()
amqp = self.__dict__.get('amqp')
if amqp is not None:
producer_pool, amqp._producer_pool = amqp._producer_pool, None
if producer_pool is not None:
producer_pool.force_close_all()

def signature(self, *args, **kwargs):
kwargs['app'] = self
return self.canvas.signature(*args, **kwargs)

def create_task_cls(self):
"""Creates a base task class using default configuration
taken from this app."""
return self.subclass_with_self(
self.task_cls, name='Task', attribute='_app',
keep_reduce=True, abstract=True,
)

def subclass_with_self(self, Class, name=None, attribute='app',
reverse=None, keep_reduce=False, **kw):
"""Subclass an app-compatible class by setting its app attribute
to be this app instance.

App-compatible means that the class has a class attribute that
provides the default app it should use, e.g.
``class Foo: app = None``.

:param Class: The app-compatible class to subclass.
:keyword name: Custom name for the target class.
:keyword attribute: Name of the attribute holding the app,
default is 'app'.

"""
Class = symbol_by_name(Class)
reverse = reverse if reverse else Class.__name__

def __reduce__(self):
return _unpickle_appattr, (reverse, self.__reduce_args__())

attrs = dict({attribute: self}, __module__=Class.__module__,
__doc__=Class.__doc__, **kw)
if not keep_reduce:
attrs['__reduce__'] = __reduce__

return type(name or Class.__name__, (Class, ), attrs)

def _rgetattr(self, path):
return attrgetter(path)(self)

def __repr__(self):
return '<{0} {1}>'.format(type(self).__name__, appstr(self))

def __reduce__(self):
if self._using_v1_reduce:
return self.__reduce_v1__()
return (_unpickle_app_v2, (self.__class__, self.__reduce_keys__()))

def __reduce_v1__(self):
# Reduce only pickles the configuration changes,
# so the default configuration doesn't have to be passed
# between processes.
return (
_unpickle_app,
(self.__class__, self.Pickler) + self.__reduce_args__(),
)

def __reduce_keys__(self):
"""Return keyword arguments used to reconstruct the object
when unpickling."""
return {
'main': self.main,
'changes': self.conf.changes if self.configured else self._preconf,
'loader': self.loader_cls,
'backend': self.backend_cls,
'amqp': self.amqp_cls,
'events': self.events_cls,
'log': self.log_cls,
'control': self.control_cls,
'accept_magic_kwargs': self.accept_magic_kwargs,
'fixups': self.fixups,
'config_source': self._config_source,
'task_cls': self.task_cls,
}

def __reduce_args__(self):
"""Deprecated method, please use :meth:`__reduce_keys__` instead."""
return (self.main, self.conf.changes,
self.loader_cls, self.backend_cls, self.amqp_cls,
self.events_cls, self.log_cls, self.control_cls,
self.accept_magic_kwargs, self._config_source)

@cached_property
def Worker(self):
return self.subclass_with_self('celery.apps.worker:Worker')

@cached_property
def WorkController(self, **kwargs):
return self.subclass_with_self('celery.worker:WorkController')

@cached_property
def Beat(self, **kwargs):
return self.subclass_with_self('celery.apps.beat:Beat')

@cached_property
def Task(self):
return self.create_task_cls()

@cached_property
def annotations(self):
return prepare_annotations(self.conf.CELERY_ANNOTATIONS)

@cached_property
def AsyncResult(self):
return self.subclass_with_self('celery.result:AsyncResult')

@cached_property
def ResultSet(self):
return self.subclass_with_self('celery.result:ResultSet')

@cached_property
def GroupResult(self):
return self.subclass_with_self('celery.result:GroupResult')

@cached_property
def TaskSet(self): # XXX compat
"""Deprecated! Please use :class:`celery.group` instead."""
return self.subclass_with_self('celery.task.sets:TaskSet')

@cached_property
def TaskSetResult(self): # XXX compat
"""Deprecated! Please use :attr:`GroupResult` instead."""
return self.subclass_with_self('celery.result:TaskSetResult')

@property
def pool(self):
if self._pool is None:
_ensure_after_fork()
limit = self.conf.BROKER_POOL_LIMIT
self._pool = self.connection().Pool(limit=limit)
return self._pool

@property
def current_task(self):
return _task_stack.top

@cached_property
def oid(self):
return oid_from(self)

@cached_property
def amqp(self):
return instantiate(self.amqp_cls, app=self)

@cached_property
def backend(self):
return self._get_backend()

@cached_property
def conf(self):
return self._get_config()

@cached_property
def control(self):
return instantiate(self.control_cls, app=self)

@cached_property
def events(self):
return instantiate(self.events_cls, app=self)

@cached_property
def loader(self):
return get_loader_cls(self.loader_cls)(app=self)

@cached_property
def log(self):
return instantiate(self.log_cls, app=self)

@cached_property
def canvas(self):
from celery import canvas
return canvas

@cached_property
def tasks(self):
self.finalize(auto=True)
return self._tasks

@cached_property
def timezone(self):
from celery.utils.timeutils import timezone
conf = self.conf
tz = conf.CELERY_TIMEZONE
if not tz:
return (timezone.get_timezone('UTC') if conf.CELERY_ENABLE_UTC
else timezone.local)
return timezone.get_timezone(self.conf.CELERY_TIMEZONE)
App = Celery # compat

+ 0
- 379
thesisenv/lib/python3.6/site-packages/celery/app/builtins.py View File

# -*- coding: utf-8 -*-
"""
celery.app.builtins
~~~~~~~~~~~~~~~~~~~

Built-in tasks that are always available in all
app instances. E.g. chord, group and xmap.

"""
from __future__ import absolute_import

from collections import deque

from celery._state import get_current_worker_task, connect_on_app_finalize
from celery.utils import uuid
from celery.utils.log import get_logger

__all__ = []

logger = get_logger(__name__)


@connect_on_app_finalize
def add_backend_cleanup_task(app):
"""The backend cleanup task can be used to clean up the default result
backend.

If the configured backend requires periodic cleanup this task is also
automatically configured to run every day at 4am (requires
:program:`celery beat` to be running).

"""
@app.task(name='celery.backend_cleanup',
shared=False, _force_evaluate=True)
def backend_cleanup():
app.backend.cleanup()
return backend_cleanup


@connect_on_app_finalize
def add_unlock_chord_task(app):
"""This task is used by result backends without native chord support.

It joins chords by creating a task chain polling the header for completion.

"""
from celery.canvas import signature
from celery.exceptions import ChordError
from celery.result import allow_join_result, result_from_tuple

default_propagate = app.conf.CELERY_CHORD_PROPAGATES

@app.task(name='celery.chord_unlock', max_retries=None, shared=False,
default_retry_delay=1, ignore_result=True, _force_evaluate=True,
bind=True)
def unlock_chord(self, group_id, callback, interval=None, propagate=None,
max_retries=None, result=None,
Result=app.AsyncResult, GroupResult=app.GroupResult,
result_from_tuple=result_from_tuple):
# if propagate is disabled exceptions raised by chord tasks
# will be sent as part of the result list to the chord callback.
# Since 3.1 propagate will be enabled by default, and instead
# the chord callback changes state to FAILURE with the
# exception set to ChordError.
propagate = default_propagate if propagate is None else propagate
if interval is None:
interval = self.default_retry_delay

# check if the task group is ready, and if so apply the callback.
deps = GroupResult(
group_id,
[result_from_tuple(r, app=app) for r in result],
app=app,
)
j = deps.join_native if deps.supports_native_join else deps.join

try:
ready = deps.ready()
except Exception as exc:
raise self.retry(
exc=exc, countdown=interval, max_retries=max_retries,
)
else:
if not ready:
raise self.retry(countdown=interval, max_retries=max_retries)

callback = signature(callback, app=app)
try:
with allow_join_result():
ret = j(timeout=3.0, propagate=propagate)
except Exception as exc:
try:
culprit = next(deps._failed_join_report())
reason = 'Dependency {0.id} raised {1!r}'.format(
culprit, exc,
)
except StopIteration:
reason = repr(exc)
logger.error('Chord %r raised: %r', group_id, exc, exc_info=1)
app.backend.chord_error_from_stack(callback,
ChordError(reason))
else:
try:
callback.delay(ret)
except Exception as exc:
logger.error('Chord %r raised: %r', group_id, exc, exc_info=1)
app.backend.chord_error_from_stack(
callback,
exc=ChordError('Callback error: {0!r}'.format(exc)),
)
return unlock_chord


@connect_on_app_finalize
def add_map_task(app):
from celery.canvas import signature

@app.task(name='celery.map', shared=False, _force_evaluate=True)
def xmap(task, it):
task = signature(task, app=app).type
return [task(item) for item in it]
return xmap


@connect_on_app_finalize
def add_starmap_task(app):
from celery.canvas import signature

@app.task(name='celery.starmap', shared=False, _force_evaluate=True)
def xstarmap(task, it):
task = signature(task, app=app).type
return [task(*item) for item in it]
return xstarmap


@connect_on_app_finalize
def add_chunk_task(app):
from celery.canvas import chunks as _chunks

@app.task(name='celery.chunks', shared=False, _force_evaluate=True)
def chunks(task, it, n):
return _chunks.apply_chunks(task, it, n)
return chunks


@connect_on_app_finalize
def add_group_task(app):
_app = app
from celery.canvas import maybe_signature, signature
from celery.result import result_from_tuple

class Group(app.Task):
app = _app
name = 'celery.group'
accept_magic_kwargs = False
_decorated = True

def run(self, tasks, result, group_id, partial_args,
add_to_parent=True):
app = self.app
result = result_from_tuple(result, app)
# any partial args are added to all tasks in the group
taskit = (signature(task, app=app).clone(partial_args)
for i, task in enumerate(tasks))
if self.request.is_eager or app.conf.CELERY_ALWAYS_EAGER:
return app.GroupResult(
result.id,
[stask.apply(group_id=group_id) for stask in taskit],
)
with app.producer_or_acquire() as pub:
[stask.apply_async(group_id=group_id, producer=pub,
add_to_parent=False) for stask in taskit]
parent = get_current_worker_task()
if add_to_parent and parent:
parent.add_trail(result)
return result

def prepare(self, options, tasks, args, **kwargs):
options['group_id'] = group_id = (
options.setdefault('task_id', uuid()))

def prepare_member(task):
task = maybe_signature(task, app=self.app)
task.options['group_id'] = group_id
return task, task.freeze()

try:
tasks, res = list(zip(
*[prepare_member(task) for task in tasks]
))
except ValueError: # tasks empty
tasks, res = [], []
return (tasks, self.app.GroupResult(group_id, res), group_id, args)

def apply_async(self, partial_args=(), kwargs={}, **options):
if self.app.conf.CELERY_ALWAYS_EAGER:
return self.apply(partial_args, kwargs, **options)
tasks, result, gid, args = self.prepare(
options, args=partial_args, **kwargs
)
super(Group, self).apply_async((
list(tasks), result.as_tuple(), gid, args), **options
)
return result

def apply(self, args=(), kwargs={}, **options):
return super(Group, self).apply(
self.prepare(options, args=args, **kwargs),
**options).get()
return Group


@connect_on_app_finalize
def add_chain_task(app):
from celery.canvas import (
Signature, chain, chord, group, maybe_signature, maybe_unroll_group,
)

_app = app

class Chain(app.Task):
app = _app
name = 'celery.chain'
accept_magic_kwargs = False
_decorated = True

def prepare_steps(self, args, tasks):
app = self.app
steps = deque(tasks)
next_step = prev_task = prev_res = None
tasks, results = [], []
i = 0
while steps:
# First task get partial args from chain.
task = maybe_signature(steps.popleft(), app=app)
task = task.clone() if i else task.clone(args)
res = task.freeze()
i += 1

if isinstance(task, group):
task = maybe_unroll_group(task)
if isinstance(task, chain):
# splice the chain
steps.extendleft(reversed(task.tasks))
continue

elif isinstance(task, group) and steps and \
not isinstance(steps[0], group):
# automatically upgrade group(..) | s to chord(group, s)
try:
next_step = steps.popleft()
# for chords we freeze by pretending it's a normal
# task instead of a group.
res = Signature.freeze(next_step)
task = chord(task, body=next_step, task_id=res.task_id)
except IndexError:
pass # no callback, so keep as group
if prev_task:
# link previous task to this task.
prev_task.link(task)
# set the results parent attribute.
if not res.parent:
res.parent = prev_res

if not isinstance(prev_task, chord):
results.append(res)
tasks.append(task)
prev_task, prev_res = task, res

return tasks, results

def apply_async(self, args=(), kwargs={}, group_id=None, chord=None,
task_id=None, link=None, link_error=None, **options):
if self.app.conf.CELERY_ALWAYS_EAGER:
return self.apply(args, kwargs, **options)
options.pop('publisher', None)
tasks, results = self.prepare_steps(args, kwargs['tasks'])
result = results[-1]
if group_id:
tasks[-1].set(group_id=group_id)
if chord:
tasks[-1].set(chord=chord)
if task_id:
tasks[-1].set(task_id=task_id)
result = tasks[-1].type.AsyncResult(task_id)
# make sure we can do a link() and link_error() on a chain object.
if link:
tasks[-1].set(link=link)
# and if any task in the chain fails, call the errbacks
if link_error:
for task in tasks:
task.set(link_error=link_error)
tasks[0].apply_async(**options)
return result

def apply(self, args=(), kwargs={}, signature=maybe_signature,
**options):
app = self.app
last, fargs = None, args # fargs passed to first task only
for task in kwargs['tasks']:
res = signature(task, app=app).clone(fargs).apply(
last and (last.get(), ),
)
res.parent, last, fargs = last, res, None
return last
return Chain


@connect_on_app_finalize
def add_chord_task(app):
"""Every chord is executed in a dedicated task, so that the chord
can be used as a signature, and this generates the task
responsible for that."""
from celery import group
from celery.canvas import maybe_signature
_app = app
default_propagate = app.conf.CELERY_CHORD_PROPAGATES

class Chord(app.Task):
app = _app
name = 'celery.chord'
accept_magic_kwargs = False
ignore_result = False
_decorated = True

def run(self, header, body, partial_args=(), interval=None,
countdown=1, max_retries=None, propagate=None,
eager=False, **kwargs):
app = self.app
propagate = default_propagate if propagate is None else propagate
group_id = uuid()

# - convert back to group if serialized
tasks = header.tasks if isinstance(header, group) else header
header = group([
maybe_signature(s, app=app).clone() for s in tasks
], app=self.app)
# - eager applies the group inline
if eager:
return header.apply(args=partial_args, task_id=group_id)

body['chord_size'] = len(header.tasks)
results = header.freeze(group_id=group_id, chord=body).results

return self.backend.apply_chord(
header, partial_args, group_id,
body, interval=interval, countdown=countdown,
max_retries=max_retries, propagate=propagate, result=results,
)

def apply_async(self, args=(), kwargs={}, task_id=None,
group_id=None, chord=None, **options):
app = self.app
if app.conf.CELERY_ALWAYS_EAGER:
return self.apply(args, kwargs, **options)
header = kwargs.pop('header')
body = kwargs.pop('body')
header, body = (maybe_signature(header, app=app),
maybe_signature(body, app=app))
# forward certain options to body
if chord is not None:
body.options['chord'] = chord
if group_id is not None:
body.options['group_id'] = group_id
[body.link(s) for s in options.pop('link', [])]
[body.link_error(s) for s in options.pop('link_error', [])]
body_result = body.freeze(task_id)
parent = super(Chord, self).apply_async((header, body, args),
kwargs, **options)
body_result.parent = parent
return body_result

def apply(self, args=(), kwargs={}, propagate=True, **options):
body = kwargs['body']
res = super(Chord, self).apply(args, dict(kwargs, eager=True),
**options)
return maybe_signature(body, app=self.app).apply(
args=(res.get(propagate=propagate).get(), ))
return Chord

+ 0
- 317
thesisenv/lib/python3.6/site-packages/celery/app/control.py View File

# -*- coding: utf-8 -*-
"""
celery.app.control
~~~~~~~~~~~~~~~~~~~

Client for worker remote control commands.
Server implementation is in :mod:`celery.worker.control`.

"""
from __future__ import absolute_import

import warnings

from kombu.pidbox import Mailbox
from kombu.utils import cached_property

from celery.exceptions import DuplicateNodenameWarning
from celery.utils.text import pluralize

__all__ = ['Inspect', 'Control', 'flatten_reply']

W_DUPNODE = """\
Received multiple replies from node {0}: {1}.
Please make sure you give each node a unique nodename using the `-n` option.\
"""


def flatten_reply(reply):
nodes, dupes = {}, set()
for item in reply:
[dupes.add(name) for name in item if name in nodes]
nodes.update(item)
if dupes:
warnings.warn(DuplicateNodenameWarning(
W_DUPNODE.format(
pluralize(len(dupes), 'name'), ', '.join(sorted(dupes)),
),
))
return nodes


class Inspect(object):
app = None

def __init__(self, destination=None, timeout=1, callback=None,
connection=None, app=None, limit=None):
self.app = app or self.app
self.destination = destination
self.timeout = timeout
self.callback = callback
self.connection = connection
self.limit = limit

def _prepare(self, reply):
if not reply:
return
by_node = flatten_reply(reply)
if self.destination and \
not isinstance(self.destination, (list, tuple)):
return by_node.get(self.destination)
return by_node

def _request(self, command, **kwargs):
return self._prepare(self.app.control.broadcast(
command,
arguments=kwargs,
destination=self.destination,
callback=self.callback,
connection=self.connection,
limit=self.limit,
timeout=self.timeout, reply=True,
))

def report(self):
return self._request('report')

def clock(self):
return self._request('clock')

def active(self, safe=False):
return self._request('dump_active', safe=safe)

def scheduled(self, safe=False):
return self._request('dump_schedule', safe=safe)

def reserved(self, safe=False):
return self._request('dump_reserved', safe=safe)

def stats(self):
return self._request('stats')

def revoked(self):
return self._request('dump_revoked')

def registered(self, *taskinfoitems):
return self._request('dump_tasks', taskinfoitems=taskinfoitems)
registered_tasks = registered

def ping(self):
return self._request('ping')

def active_queues(self):
return self._request('active_queues')

def query_task(self, ids):
return self._request('query_task', ids=ids)

def conf(self, with_defaults=False):
return self._request('dump_conf', with_defaults=with_defaults)

def hello(self, from_node, revoked=None):
return self._request('hello', from_node=from_node, revoked=revoked)

def memsample(self):
return self._request('memsample')

def memdump(self, samples=10):
return self._request('memdump', samples=samples)

def objgraph(self, type='Request', n=200, max_depth=10):
return self._request('objgraph', num=n, max_depth=max_depth, type=type)


class Control(object):
Mailbox = Mailbox

def __init__(self, app=None):
self.app = app
self.mailbox = self.Mailbox('celery', type='fanout', accept=['json'])

@cached_property
def inspect(self):
return self.app.subclass_with_self(Inspect, reverse='control.inspect')

def purge(self, connection=None):
"""Discard all waiting tasks.

This will ignore all tasks waiting for execution, and they will
be deleted from the messaging server.

:returns: the number of tasks discarded.

"""
with self.app.connection_or_acquire(connection) as conn:
return self.app.amqp.TaskConsumer(conn).purge()
discard_all = purge

def election(self, id, topic, action=None, connection=None):
self.broadcast('election', connection=connection, arguments={
'id': id, 'topic': topic, 'action': action,
})

def revoke(self, task_id, destination=None, terminate=False,
signal='SIGTERM', **kwargs):
"""Tell all (or specific) workers to revoke a task by id.

If a task is revoked, the workers will ignore the task and
not execute it after all.

:param task_id: Id of the task to revoke.
:keyword terminate: Also terminate the process currently working
on the task (if any).
:keyword signal: Name of signal to send to process if terminate.
Default is TERM.

See :meth:`broadcast` for supported keyword arguments.

"""
return self.broadcast('revoke', destination=destination,
arguments={'task_id': task_id,
'terminate': terminate,
'signal': signal}, **kwargs)

def ping(self, destination=None, timeout=1, **kwargs):
"""Ping all (or specific) workers.

Will return the list of answers.

See :meth:`broadcast` for supported keyword arguments.

"""
return self.broadcast('ping', reply=True, destination=destination,
timeout=timeout, **kwargs)

def rate_limit(self, task_name, rate_limit, destination=None, **kwargs):
"""Tell all (or specific) workers to set a new rate limit
for task by type.

:param task_name: Name of task to change rate limit for.
:param rate_limit: The rate limit as tasks per second, or a rate limit
string (`'100/m'`, etc.
see :attr:`celery.task.base.Task.rate_limit` for
more information).

See :meth:`broadcast` for supported keyword arguments.

"""
return self.broadcast('rate_limit', destination=destination,
arguments={'task_name': task_name,
'rate_limit': rate_limit},
**kwargs)

def add_consumer(self, queue, exchange=None, exchange_type='direct',
routing_key=None, options=None, **kwargs):
"""Tell all (or specific) workers to start consuming from a new queue.

Only the queue name is required as if only the queue is specified
then the exchange/routing key will be set to the same name (
like automatic queues do).

.. note::

This command does not respect the default queue/exchange
options in the configuration.

:param queue: Name of queue to start consuming from.
:keyword exchange: Optional name of exchange.
:keyword exchange_type: Type of exchange (defaults to 'direct')
command to, when empty broadcast to all workers.
:keyword routing_key: Optional routing key.
:keyword options: Additional options as supported
by :meth:`kombu.entitiy.Queue.from_dict`.

See :meth:`broadcast` for supported keyword arguments.

"""
return self.broadcast(
'add_consumer',
arguments=dict({'queue': queue, 'exchange': exchange,
'exchange_type': exchange_type,
'routing_key': routing_key}, **options or {}),
**kwargs
)

def cancel_consumer(self, queue, **kwargs):
"""Tell all (or specific) workers to stop consuming from ``queue``.

Supports the same keyword arguments as :meth:`broadcast`.

"""
return self.broadcast(
'cancel_consumer', arguments={'queue': queue}, **kwargs
)

def time_limit(self, task_name, soft=None, hard=None, **kwargs):
"""Tell all (or specific) workers to set time limits for
a task by type.

:param task_name: Name of task to change time limits for.
:keyword soft: New soft time limit (in seconds).
:keyword hard: New hard time limit (in seconds).

Any additional keyword arguments are passed on to :meth:`broadcast`.

"""
return self.broadcast(
'time_limit',
arguments={'task_name': task_name,
'hard': hard, 'soft': soft}, **kwargs)

def enable_events(self, destination=None, **kwargs):
"""Tell all (or specific) workers to enable events."""
return self.broadcast('enable_events', {}, destination, **kwargs)

def disable_events(self, destination=None, **kwargs):
"""Tell all (or specific) workers to disable events."""
return self.broadcast('disable_events', {}, destination, **kwargs)

def pool_grow(self, n=1, destination=None, **kwargs):
"""Tell all (or specific) workers to grow the pool by ``n``.

Supports the same arguments as :meth:`broadcast`.

"""
return self.broadcast('pool_grow', {'n': n}, destination, **kwargs)

def pool_shrink(self, n=1, destination=None, **kwargs):
"""Tell all (or specific) workers to shrink the pool by ``n``.

Supports the same arguments as :meth:`broadcast`.

"""
return self.broadcast('pool_shrink', {'n': n}, destination, **kwargs)

def autoscale(self, max, min, destination=None, **kwargs):
"""Change worker(s) autoscale setting.

Supports the same arguments as :meth:`broadcast`.

"""
return self.broadcast(
'autoscale', {'max': max, 'min': min}, destination, **kwargs)

def broadcast(self, command, arguments=None, destination=None,
connection=None, reply=False, timeout=1, limit=None,
callback=None, channel=None, **extra_kwargs):
"""Broadcast a control command to the celery workers.

:param command: Name of command to send.
:param arguments: Keyword arguments for the command.
:keyword destination: If set, a list of the hosts to send the
command to, when empty broadcast to all workers.
:keyword connection: Custom broker connection to use, if not set,
a connection will be established automatically.
:keyword reply: Wait for and return the reply.
:keyword timeout: Timeout in seconds to wait for the reply.
:keyword limit: Limit number of replies.
:keyword callback: Callback called immediately for each reply
received.

"""
with self.app.connection_or_acquire(connection) as conn:
arguments = dict(arguments or {}, **extra_kwargs)
return self.mailbox(conn)._broadcast(
command, arguments, destination, reply, timeout,
limit, callback, channel=channel,
)

+ 0
- 274
thesisenv/lib/python3.6/site-packages/celery/app/defaults.py View File

# -*- coding: utf-8 -*-
"""
celery.app.defaults
~~~~~~~~~~~~~~~~~~~

Configuration introspection and defaults.

"""
from __future__ import absolute_import

import sys

from collections import deque, namedtuple
from datetime import timedelta

from celery.five import items
from celery.utils import strtobool
from celery.utils.functional import memoize

__all__ = ['Option', 'NAMESPACES', 'flatten', 'find']

is_jython = sys.platform.startswith('java')
is_pypy = hasattr(sys, 'pypy_version_info')

DEFAULT_POOL = 'prefork'
if is_jython:
DEFAULT_POOL = 'threads'
elif is_pypy:
if sys.pypy_version_info[0:3] < (1, 5, 0):
DEFAULT_POOL = 'solo'
else:
DEFAULT_POOL = 'prefork'

DEFAULT_ACCEPT_CONTENT = ['json', 'pickle', 'msgpack', 'yaml']
DEFAULT_PROCESS_LOG_FMT = """
[%(asctime)s: %(levelname)s/%(processName)s] %(message)s
""".strip()
DEFAULT_LOG_FMT = '[%(asctime)s: %(levelname)s] %(message)s'
DEFAULT_TASK_LOG_FMT = """[%(asctime)s: %(levelname)s/%(processName)s] \
%(task_name)s[%(task_id)s]: %(message)s"""

_BROKER_OLD = {'deprecate_by': '2.5', 'remove_by': '4.0',
'alt': 'BROKER_URL setting'}
_REDIS_OLD = {'deprecate_by': '2.5', 'remove_by': '4.0',
'alt': 'URL form of CELERY_RESULT_BACKEND'}

searchresult = namedtuple('searchresult', ('namespace', 'key', 'type'))


# logging: processName first introduced in Py 2.6.2 (Issue #1644).
if sys.version_info < (2, 6, 2):
DEFAULT_PROCESS_LOG_FMT = DEFAULT_LOG_FMT


class Option(object):
alt = None
deprecate_by = None
remove_by = None
typemap = dict(string=str, int=int, float=float, any=lambda v: v,
bool=strtobool, dict=dict, tuple=tuple)

def __init__(self, default=None, *args, **kwargs):
self.default = default
self.type = kwargs.get('type') or 'string'
for attr, value in items(kwargs):
setattr(self, attr, value)

def to_python(self, value):
return self.typemap[self.type](value)

def __repr__(self):
return '<Option: type->{0} default->{1!r}>'.format(self.type,
self.default)

NAMESPACES = {
'BROKER': {
'URL': Option(None, type='string'),
'CONNECTION_TIMEOUT': Option(4, type='float'),
'CONNECTION_RETRY': Option(True, type='bool'),
'CONNECTION_MAX_RETRIES': Option(100, type='int'),
'FAILOVER_STRATEGY': Option(None, type='string'),
'HEARTBEAT': Option(None, type='int'),
'HEARTBEAT_CHECKRATE': Option(3.0, type='int'),
'LOGIN_METHOD': Option(None, type='string'),
'POOL_LIMIT': Option(10, type='int'),
'USE_SSL': Option(False, type='bool'),
'TRANSPORT': Option(type='string'),
'TRANSPORT_OPTIONS': Option({}, type='dict'),
'HOST': Option(type='string', **_BROKER_OLD),
'PORT': Option(type='int', **_BROKER_OLD),
'USER': Option(type='string', **_BROKER_OLD),
'PASSWORD': Option(type='string', **_BROKER_OLD),
'VHOST': Option(type='string', **_BROKER_OLD),
},
'CASSANDRA': {
'COLUMN_FAMILY': Option(type='string'),
'DETAILED_MODE': Option(False, type='bool'),
'KEYSPACE': Option(type='string'),
'READ_CONSISTENCY': Option(type='string'),
'SERVERS': Option(type='list'),
'WRITE_CONSISTENCY': Option(type='string'),
},
'CELERY': {
'ACCEPT_CONTENT': Option(DEFAULT_ACCEPT_CONTENT, type='list'),
'ACKS_LATE': Option(False, type='bool'),
'ALWAYS_EAGER': Option(False, type='bool'),
'ANNOTATIONS': Option(type='any'),
'BROADCAST_QUEUE': Option('celeryctl'),
'BROADCAST_EXCHANGE': Option('celeryctl'),
'BROADCAST_EXCHANGE_TYPE': Option('fanout'),
'CACHE_BACKEND': Option(),
'CACHE_BACKEND_OPTIONS': Option({}, type='dict'),
'CHORD_PROPAGATES': Option(True, type='bool'),
'COUCHBASE_BACKEND_SETTINGS': Option(None, type='dict'),
'CREATE_MISSING_QUEUES': Option(True, type='bool'),
'DEFAULT_RATE_LIMIT': Option(type='string'),
'DISABLE_RATE_LIMITS': Option(False, type='bool'),
'DEFAULT_ROUTING_KEY': Option('celery'),
'DEFAULT_QUEUE': Option('celery'),
'DEFAULT_EXCHANGE': Option('celery'),
'DEFAULT_EXCHANGE_TYPE': Option('direct'),
'DEFAULT_DELIVERY_MODE': Option(2, type='string'),
'EAGER_PROPAGATES_EXCEPTIONS': Option(False, type='bool'),
'ENABLE_UTC': Option(True, type='bool'),
'ENABLE_REMOTE_CONTROL': Option(True, type='bool'),
'EVENT_SERIALIZER': Option('json'),
'EVENT_QUEUE_EXPIRES': Option(None, type='float'),
'EVENT_QUEUE_TTL': Option(None, type='float'),
'IMPORTS': Option((), type='tuple'),
'INCLUDE': Option((), type='tuple'),
'IGNORE_RESULT': Option(False, type='bool'),
'MAX_CACHED_RESULTS': Option(100, type='int'),
'MESSAGE_COMPRESSION': Option(type='string'),
'MONGODB_BACKEND_SETTINGS': Option(type='dict'),
'REDIS_HOST': Option(type='string', **_REDIS_OLD),
'REDIS_PORT': Option(type='int', **_REDIS_OLD),
'REDIS_DB': Option(type='int', **_REDIS_OLD),
'REDIS_PASSWORD': Option(type='string', **_REDIS_OLD),
'REDIS_MAX_CONNECTIONS': Option(type='int'),
'RESULT_BACKEND': Option(type='string'),
'RESULT_DB_SHORT_LIVED_SESSIONS': Option(False, type='bool'),
'RESULT_DB_TABLENAMES': Option(type='dict'),
'RESULT_DBURI': Option(),
'RESULT_ENGINE_OPTIONS': Option(type='dict'),
'RESULT_EXCHANGE': Option('celeryresults'),
'RESULT_EXCHANGE_TYPE': Option('direct'),
'RESULT_SERIALIZER': Option('pickle'),
'RESULT_PERSISTENT': Option(None, type='bool'),
'ROUTES': Option(type='any'),
'SEND_EVENTS': Option(False, type='bool'),
'SEND_TASK_ERROR_EMAILS': Option(False, type='bool'),
'SEND_TASK_SENT_EVENT': Option(False, type='bool'),
'STORE_ERRORS_EVEN_IF_IGNORED': Option(False, type='bool'),
'TASK_PUBLISH_RETRY': Option(True, type='bool'),
'TASK_PUBLISH_RETRY_POLICY': Option({
'max_retries': 3,
'interval_start': 0,
'interval_max': 1,
'interval_step': 0.2}, type='dict'),
'TASK_RESULT_EXPIRES': Option(timedelta(days=1), type='float'),
'TASK_SERIALIZER': Option('pickle'),
'TIMEZONE': Option(type='string'),
'TRACK_STARTED': Option(False, type='bool'),
'REDIRECT_STDOUTS': Option(True, type='bool'),
'REDIRECT_STDOUTS_LEVEL': Option('WARNING'),
'QUEUES': Option(type='dict'),
'QUEUE_HA_POLICY': Option(None, type='string'),
'SECURITY_KEY': Option(type='string'),
'SECURITY_CERTIFICATE': Option(type='string'),
'SECURITY_CERT_STORE': Option(type='string'),
'WORKER_DIRECT': Option(False, type='bool'),
},
'CELERYD': {
'AGENT': Option(None, type='string'),
'AUTOSCALER': Option('celery.worker.autoscale:Autoscaler'),
'AUTORELOADER': Option('celery.worker.autoreload:Autoreloader'),
'CONCURRENCY': Option(0, type='int'),
'TIMER': Option(type='string'),
'TIMER_PRECISION': Option(1.0, type='float'),
'FORCE_EXECV': Option(False, type='bool'),
'HIJACK_ROOT_LOGGER': Option(True, type='bool'),
'CONSUMER': Option('celery.worker.consumer:Consumer', type='string'),
'LOG_FORMAT': Option(DEFAULT_PROCESS_LOG_FMT),
'LOG_COLOR': Option(type='bool'),
'LOG_LEVEL': Option('WARN', deprecate_by='2.4', remove_by='4.0',
alt='--loglevel argument'),
'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0',
alt='--logfile argument'),
'MAX_TASKS_PER_CHILD': Option(type='int'),
'POOL': Option(DEFAULT_POOL),
'POOL_PUTLOCKS': Option(True, type='bool'),
'POOL_RESTARTS': Option(False, type='bool'),
'PREFETCH_MULTIPLIER': Option(4, type='int'),
'STATE_DB': Option(),
'TASK_LOG_FORMAT': Option(DEFAULT_TASK_LOG_FMT),
'TASK_SOFT_TIME_LIMIT': Option(type='float'),
'TASK_TIME_LIMIT': Option(type='float'),
'WORKER_LOST_WAIT': Option(10.0, type='float')
},
'CELERYBEAT': {
'SCHEDULE': Option({}, type='dict'),
'SCHEDULER': Option('celery.beat:PersistentScheduler'),
'SCHEDULE_FILENAME': Option('celerybeat-schedule'),
'SYNC_EVERY': Option(0, type='int'),
'MAX_LOOP_INTERVAL': Option(0, type='float'),
'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='4.0',
alt='--loglevel argument'),
'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0',
alt='--logfile argument'),
},
'CELERYMON': {
'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='4.0',
alt='--loglevel argument'),
'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0',
alt='--logfile argument'),
'LOG_FORMAT': Option(DEFAULT_LOG_FMT),
},
'EMAIL': {
'HOST': Option('localhost'),
'PORT': Option(25, type='int'),
'HOST_USER': Option(),
'HOST_PASSWORD': Option(),
'TIMEOUT': Option(2, type='float'),
'USE_SSL': Option(False, type='bool'),
'USE_TLS': Option(False, type='bool'),
},
'SERVER_EMAIL': Option('celery@localhost'),
'ADMINS': Option((), type='tuple'),
}


def flatten(d, ns=''):
stack = deque([(ns, d)])
while stack:
name, space = stack.popleft()
for key, value in items(space):
if isinstance(value, dict):
stack.append((name + key + '_', value))
else:
yield name + key, value
DEFAULTS = dict((key, value.default) for key, value in flatten(NAMESPACES))


def find_deprecated_settings(source):
from celery.utils import warn_deprecated
for name, opt in flatten(NAMESPACES):
if (opt.deprecate_by or opt.remove_by) and getattr(source, name, None):
warn_deprecated(description='The {0!r} setting'.format(name),
deprecation=opt.deprecate_by,
removal=opt.remove_by,
alternative='Use the {0.alt} instead'.format(opt))
return source


@memoize(maxsize=None)
def find(name, namespace='celery'):
# - Try specified namespace first.
namespace = namespace.upper()
try:
return searchresult(
namespace, name.upper(), NAMESPACES[namespace][name.upper()],
)
except KeyError:
# - Try all the other namespaces.
for ns, keys in items(NAMESPACES):
if ns.upper() == name.upper():
return searchresult(None, ns, keys)
elif isinstance(keys, dict):
try:
return searchresult(ns, name.upper(), keys[name.upper()])
except KeyError:
pass
# - See if name is a qualname last.
return searchresult(None, name.upper(), DEFAULTS[name.upper()])

+ 0
- 257
thesisenv/lib/python3.6/site-packages/celery/app/log.py View File

# -*- coding: utf-8 -*-
"""
celery.app.log
~~~~~~~~~~~~~~

The Celery instances logging section: ``Celery.log``.

Sets up logging for the worker and other programs,
redirects stdouts, colors log output, patches logging
related compatibility fixes, and so on.

"""
from __future__ import absolute_import

import logging
import os
import sys

from logging.handlers import WatchedFileHandler

from kombu.log import NullHandler
from kombu.utils.encoding import set_default_encoding_file

from celery import signals
from celery._state import get_current_task
from celery.five import class_property, string_t
from celery.utils import isatty, node_format
from celery.utils.log import (
get_logger, mlevel,
ColorFormatter, ensure_process_aware_logger,
LoggingProxy, get_multiprocessing_logger,
reset_multiprocessing_logger,
)
from celery.utils.term import colored

__all__ = ['TaskFormatter', 'Logging']

MP_LOG = os.environ.get('MP_LOG', False)


class TaskFormatter(ColorFormatter):

def format(self, record):
task = get_current_task()
if task and task.request:
record.__dict__.update(task_id=task.request.id,
task_name=task.name)
else:
record.__dict__.setdefault('task_name', '???')
record.__dict__.setdefault('task_id', '???')
return ColorFormatter.format(self, record)


class Logging(object):
#: The logging subsystem is only configured once per process.
#: setup_logging_subsystem sets this flag, and subsequent calls
#: will do nothing.
_setup = False

def __init__(self, app):
self.app = app
self.loglevel = mlevel(self.app.conf.CELERYD_LOG_LEVEL)
self.format = self.app.conf.CELERYD_LOG_FORMAT
self.task_format = self.app.conf.CELERYD_TASK_LOG_FORMAT
self.colorize = self.app.conf.CELERYD_LOG_COLOR

def setup(self, loglevel=None, logfile=None, redirect_stdouts=False,
redirect_level='WARNING', colorize=None, hostname=None):
handled = self.setup_logging_subsystem(
loglevel, logfile, colorize=colorize, hostname=hostname,
)
if not handled:
if redirect_stdouts:
self.redirect_stdouts(redirect_level)
os.environ.update(
CELERY_LOG_LEVEL=str(loglevel) if loglevel else '',
CELERY_LOG_FILE=str(logfile) if logfile else '',
)
return handled

def redirect_stdouts(self, loglevel=None, name='celery.redirected'):
self.redirect_stdouts_to_logger(
get_logger(name), loglevel=loglevel
)
os.environ.update(
CELERY_LOG_REDIRECT='1',
CELERY_LOG_REDIRECT_LEVEL=str(loglevel or ''),
)

def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None,
colorize=None, hostname=None, **kwargs):
if self.already_setup:
return
if logfile and hostname:
logfile = node_format(logfile, hostname)
self.already_setup = True
loglevel = mlevel(loglevel or self.loglevel)
format = format or self.format
colorize = self.supports_color(colorize, logfile)
reset_multiprocessing_logger()
ensure_process_aware_logger()
receivers = signals.setup_logging.send(
sender=None, loglevel=loglevel, logfile=logfile,
format=format, colorize=colorize,
)

if not receivers:
root = logging.getLogger()

if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
root.handlers = []
get_logger('celery').handlers = []
get_logger('celery.task').handlers = []
get_logger('celery.redirected').handlers = []

# Configure root logger
self._configure_logger(
root, logfile, loglevel, format, colorize, **kwargs
)

# Configure the multiprocessing logger
self._configure_logger(
get_multiprocessing_logger(),
logfile, loglevel if MP_LOG else logging.ERROR,
format, colorize, **kwargs
)

signals.after_setup_logger.send(
sender=None, logger=root,
loglevel=loglevel, logfile=logfile,
format=format, colorize=colorize,
)

# then setup the root task logger.
self.setup_task_loggers(loglevel, logfile, colorize=colorize)

try:
stream = logging.getLogger().handlers[0].stream
except (AttributeError, IndexError):
pass
else:
set_default_encoding_file(stream)

# This is a hack for multiprocessing's fork+exec, so that
# logging before Process.run works.
logfile_name = logfile if isinstance(logfile, string_t) else ''
os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel),
_MP_FORK_LOGFILE_=logfile_name,
_MP_FORK_LOGFORMAT_=format)
return receivers

def _configure_logger(self, logger, logfile, loglevel,
format, colorize, **kwargs):
if logger is not None:
self.setup_handlers(logger, logfile, format,
colorize, **kwargs)
if loglevel:
logger.setLevel(loglevel)

def setup_task_loggers(self, loglevel=None, logfile=None, format=None,
colorize=None, propagate=False, **kwargs):
"""Setup the task logger.

If `logfile` is not specified, then `sys.stderr` is used.

Will return the base task logger object.

"""
loglevel = mlevel(loglevel or self.loglevel)
format = format or self.task_format
colorize = self.supports_color(colorize, logfile)

logger = self.setup_handlers(
get_logger('celery.task'),
logfile, format, colorize,
formatter=TaskFormatter, **kwargs
)
logger.setLevel(loglevel)
# this is an int for some reason, better not question why.
logger.propagate = int(propagate)
signals.after_setup_task_logger.send(
sender=None, logger=logger,
loglevel=loglevel, logfile=logfile,
format=format, colorize=colorize,
)
return logger

def redirect_stdouts_to_logger(self, logger, loglevel=None,
stdout=True, stderr=True):
"""Redirect :class:`sys.stdout` and :class:`sys.stderr` to a
logging instance.

:param logger: The :class:`logging.Logger` instance to redirect to.
:param loglevel: The loglevel redirected messages will be logged as.

"""
proxy = LoggingProxy(logger, loglevel)
if stdout:
sys.stdout = proxy
if stderr:
sys.stderr = proxy
return proxy

def supports_color(self, colorize=None, logfile=None):
colorize = self.colorize if colorize is None else colorize
if self.app.IS_WINDOWS:
# Windows does not support ANSI color codes.
return False
if colorize or colorize is None:
# Only use color if there is no active log file
# and stderr is an actual terminal.
return logfile is None and isatty(sys.stderr)
return colorize

def colored(self, logfile=None, enabled=None):
return colored(enabled=self.supports_color(enabled, logfile))

def setup_handlers(self, logger, logfile, format, colorize,
formatter=ColorFormatter, **kwargs):
if self._is_configured(logger):
return logger
handler = self._detect_handler(logfile)
handler.setFormatter(formatter(format, use_color=colorize))
logger.addHandler(handler)
return logger

def _detect_handler(self, logfile=None):
"""Create log handler with either a filename, an open stream
or :const:`None` (stderr)."""
logfile = sys.__stderr__ if logfile is None else logfile
if hasattr(logfile, 'write'):
return logging.StreamHandler(logfile)
return WatchedFileHandler(logfile)

def _has_handler(self, logger):
if logger.handlers:
return any(not isinstance(h, NullHandler) for h in logger.handlers)

def _is_configured(self, logger):
return self._has_handler(logger) and not getattr(
logger, '_rudimentary_setup', False)

def setup_logger(self, name='celery', *args, **kwargs):
"""Deprecated: No longer used."""
self.setup_logging_subsystem(*args, **kwargs)
return logging.root

def get_default_logger(self, name='celery', **kwargs):
return get_logger(name)

@class_property
def already_setup(cls):
return cls._setup

@already_setup.setter # noqa
def already_setup(cls, was_setup):
cls._setup = was_setup

+ 0
- 71
thesisenv/lib/python3.6/site-packages/celery/app/registry.py View File

# -*- coding: utf-8 -*-
"""
celery.app.registry
~~~~~~~~~~~~~~~~~~~

Registry of available tasks.

"""
from __future__ import absolute_import

import inspect

from importlib import import_module

from celery._state import get_current_app
from celery.exceptions import NotRegistered
from celery.five import items

__all__ = ['TaskRegistry']


class TaskRegistry(dict):
NotRegistered = NotRegistered

def __missing__(self, key):
raise self.NotRegistered(key)

def register(self, task):
"""Register a task in the task registry.

The task will be automatically instantiated if not already an
instance.

"""
self[task.name] = inspect.isclass(task) and task() or task

def unregister(self, name):
"""Unregister task by name.

:param name: name of the task to unregister, or a
:class:`celery.task.base.Task` with a valid `name` attribute.

:raises celery.exceptions.NotRegistered: if the task has not
been registered.

"""
try:
self.pop(getattr(name, 'name', name))
except KeyError:
raise self.NotRegistered(name)

# -- these methods are irrelevant now and will be removed in 4.0
def regular(self):
return self.filter_types('regular')

def periodic(self):
return self.filter_types('periodic')

def filter_types(self, type):
return dict((name, task) for name, task in items(self)
if getattr(task, 'type', 'regular') == type)


def _unpickle_task(name):
return get_current_app().tasks[name]


def _unpickle_task_v2(name, module=None):
if module:
import_module(module)
return get_current_app().tasks[name]

+ 0
- 95
thesisenv/lib/python3.6/site-packages/celery/app/routes.py View File

# -*- coding: utf-8 -*-
"""
celery.routes
~~~~~~~~~~~~~

Contains utilities for working with task routers,
(:setting:`CELERY_ROUTES`).

"""
from __future__ import absolute_import

from celery.exceptions import QueueNotFound
from celery.five import string_t
from celery.utils import lpmerge
from celery.utils.functional import firstmethod, mlazy
from celery.utils.imports import instantiate

__all__ = ['MapRoute', 'Router', 'prepare']

_first_route = firstmethod('route_for_task')


class MapRoute(object):
"""Creates a router out of a :class:`dict`."""

def __init__(self, map):
self.map = map

def route_for_task(self, task, *args, **kwargs):
try:
return dict(self.map[task])
except KeyError:
pass
except ValueError:
return {'queue': self.map[task]}


class Router(object):

def __init__(self, routes=None, queues=None,
create_missing=False, app=None):
self.app = app
self.queues = {} if queues is None else queues
self.routes = [] if routes is None else routes
self.create_missing = create_missing

def route(self, options, task, args=(), kwargs={}):
options = self.expand_destination(options) # expands 'queue'
if self.routes:
route = self.lookup_route(task, args, kwargs)
if route: # expands 'queue' in route.
return lpmerge(self.expand_destination(route), options)
if 'queue' not in options:
options = lpmerge(self.expand_destination(
self.app.conf.CELERY_DEFAULT_QUEUE), options)
return options

def expand_destination(self, route):
# Route can be a queue name: convenient for direct exchanges.
if isinstance(route, string_t):
queue, route = route, {}
else:
# can use defaults from configured queue, but override specific
# things (like the routing_key): great for topic exchanges.
queue = route.pop('queue', None)

if queue:
try:
Q = self.queues[queue] # noqa
except KeyError:
raise QueueNotFound(
'Queue {0!r} missing from CELERY_QUEUES'.format(queue))
# needs to be declared by publisher
route['queue'] = Q
return route

def lookup_route(self, task, args=None, kwargs=None):
return _first_route(self.routes, task, args, kwargs)


def prepare(routes):
"""Expands the :setting:`CELERY_ROUTES` setting."""

def expand_route(route):
if isinstance(route, dict):
return MapRoute(route)
if isinstance(route, string_t):
return mlazy(instantiate, route)
return route

if routes is None:
return ()
if not isinstance(routes, (list, tuple)):
routes = (routes, )
return [expand_route(route) for route in routes]

+ 0
- 948
thesisenv/lib/python3.6/site-packages/celery/app/task.py View File

# -*- coding: utf-8 -*-
"""
celery.app.task
~~~~~~~~~~~~~~~

Task Implementation: Task request context, and the base task class.

"""
from __future__ import absolute_import

import sys

from billiard.einfo import ExceptionInfo

from celery import current_app
from celery import states
from celery._state import _task_stack
from celery.canvas import signature
from celery.exceptions import MaxRetriesExceededError, Reject, Retry
from celery.five import class_property, items, with_metaclass
from celery.local import Proxy
from celery.result import EagerResult
from celery.utils import gen_task_name, fun_takes_kwargs, uuid, maybe_reraise
from celery.utils.functional import mattrgetter, maybe_list
from celery.utils.imports import instantiate
from celery.utils.mail import ErrorMail

from .annotations import resolve_all as resolve_all_annotations
from .registry import _unpickle_task_v2
from .utils import appstr

__all__ = ['Context', 'Task']

#: extracts attributes related to publishing a message from an object.
extract_exec_options = mattrgetter(
'queue', 'routing_key', 'exchange', 'priority', 'expires',
'serializer', 'delivery_mode', 'compression', 'time_limit',
'soft_time_limit', 'immediate', 'mandatory', # imm+man is deprecated
)

# We take __repr__ very seriously around here ;)
R_BOUND_TASK = '<class {0.__name__} of {app}{flags}>'
R_UNBOUND_TASK = '<unbound {0.__name__}{flags}>'
R_SELF_TASK = '<@task {0.name} bound to other {0.__self__}>'
R_INSTANCE = '<@task: {0.name} of {app}{flags}>'


class _CompatShared(object):

def __init__(self, name, cons):
self.name = name
self.cons = cons

def __hash__(self):
return hash(self.name)

def __repr__(self):
return '<OldTask: %r>' % (self.name, )

def __call__(self, app):
return self.cons(app)


def _strflags(flags, default=''):
if flags:
return ' ({0})'.format(', '.join(flags))
return default


def _reprtask(task, fmt=None, flags=None):
flags = list(flags) if flags is not None else []
flags.append('v2 compatible') if task.__v2_compat__ else None
if not fmt:
fmt = R_BOUND_TASK if task._app else R_UNBOUND_TASK
return fmt.format(
task, flags=_strflags(flags),
app=appstr(task._app) if task._app else None,
)


class Context(object):
# Default context
logfile = None
loglevel = None
hostname = None
id = None
args = None
kwargs = None
retries = 0
eta = None
expires = None
is_eager = False
headers = None
delivery_info = None
reply_to = None
correlation_id = None
taskset = None # compat alias to group
group = None
chord = None
utc = None
called_directly = True
callbacks = None
errbacks = None
timelimit = None
_children = None # see property
_protected = 0

def __init__(self, *args, **kwargs):
self.update(*args, **kwargs)

def update(self, *args, **kwargs):
return self.__dict__.update(*args, **kwargs)

def clear(self):
return self.__dict__.clear()

def get(self, key, default=None):
return getattr(self, key, default)

def __repr__(self):
return '<Context: {0!r}>'.format(vars(self))

@property
def children(self):
# children must be an empy list for every thread
if self._children is None:
self._children = []
return self._children


class TaskType(type):
"""Meta class for tasks.

Automatically registers the task in the task registry (except
if the :attr:`Task.abstract`` attribute is set).

If no :attr:`Task.name` attribute is provided, then the name is generated
from the module and class name.

"""
_creation_count = {} # used by old non-abstract task classes

def __new__(cls, name, bases, attrs):
new = super(TaskType, cls).__new__
task_module = attrs.get('__module__') or '__main__'

# - Abstract class: abstract attribute should not be inherited.
abstract = attrs.pop('abstract', None)
if abstract or not attrs.get('autoregister', True):
return new(cls, name, bases, attrs)

# The 'app' attribute is now a property, with the real app located
# in the '_app' attribute. Previously this was a regular attribute,
# so we should support classes defining it.
app = attrs.pop('_app', None) or attrs.pop('app', None)

# Attempt to inherit app from one the bases
if not isinstance(app, Proxy) and app is None:
for base in bases:
if getattr(base, '_app', None):
app = base._app
break
else:
app = current_app._get_current_object()
attrs['_app'] = app

# - Automatically generate missing/empty name.
task_name = attrs.get('name')
if not task_name:
attrs['name'] = task_name = gen_task_name(app, name, task_module)

if not attrs.get('_decorated'):
# non decorated tasks must also be shared in case
# an app is created multiple times due to modules
# imported under multiple names.
# Hairy stuff, here to be compatible with 2.x.
# People should not use non-abstract task classes anymore,
# use the task decorator.
from celery._state import connect_on_app_finalize
unique_name = '.'.join([task_module, name])
if unique_name not in cls._creation_count:
# the creation count is used as a safety
# so that the same task is not added recursively
# to the set of constructors.
cls._creation_count[unique_name] = 1
connect_on_app_finalize(_CompatShared(
unique_name,
lambda app: TaskType.__new__(cls, name, bases,
dict(attrs, _app=app)),
))

# - Create and register class.
# Because of the way import happens (recursively)
# we may or may not be the first time the task tries to register
# with the framework. There should only be one class for each task
# name, so we always return the registered version.
tasks = app._tasks
if task_name not in tasks:
tasks.register(new(cls, name, bases, attrs))
instance = tasks[task_name]
instance.bind(app)
return instance.__class__

def __repr__(cls):
return _reprtask(cls)


@with_metaclass(TaskType)
class Task(object):
"""Task base class.

When called tasks apply the :meth:`run` method. This method must
be defined by all tasks (that is unless the :meth:`__call__` method
is overridden).

"""
__trace__ = None
__v2_compat__ = False # set by old base in celery.task.base

ErrorMail = ErrorMail
MaxRetriesExceededError = MaxRetriesExceededError

#: Execution strategy used, or the qualified name of one.
Strategy = 'celery.worker.strategy:default'

#: This is the instance bound to if the task is a method of a class.
__self__ = None

#: The application instance associated with this task class.
_app = None

#: Name of the task.
name = None

#: If :const:`True` the task is an abstract base class.
abstract = True

#: If disabled the worker will not forward magic keyword arguments.
#: Deprecated and scheduled for removal in v4.0.
accept_magic_kwargs = False

#: Maximum number of retries before giving up. If set to :const:`None`,
#: it will **never** stop retrying.
max_retries = 3

#: Default time in seconds before a retry of the task should be
#: executed. 3 minutes by default.
default_retry_delay = 3 * 60

#: Rate limit for this task type. Examples: :const:`None` (no rate
#: limit), `'100/s'` (hundred tasks a second), `'100/m'` (hundred tasks
#: a minute),`'100/h'` (hundred tasks an hour)
rate_limit = None

#: If enabled the worker will not store task state and return values
#: for this task. Defaults to the :setting:`CELERY_IGNORE_RESULT`
#: setting.
ignore_result = None

#: If enabled the request will keep track of subtasks started by
#: this task, and this information will be sent with the result
#: (``result.children``).
trail = True

#: If enabled the worker will send monitoring events related to
#: this task (but only if the worker is configured to send
#: task related events).
#: Note that this has no effect on the task-failure event case
#: where a task is not registered (as it will have no task class
#: to check this flag).
send_events = True

#: When enabled errors will be stored even if the task is otherwise
#: configured to ignore results.
store_errors_even_if_ignored = None

#: If enabled an email will be sent to :setting:`ADMINS` whenever a task
#: of this type fails.
send_error_emails = None

#: The name of a serializer that are registered with
#: :mod:`kombu.serialization.registry`. Default is `'pickle'`.
serializer = None

#: Hard time limit.
#: Defaults to the :setting:`CELERYD_TASK_TIME_LIMIT` setting.
time_limit = None

#: Soft time limit.
#: Defaults to the :setting:`CELERYD_TASK_SOFT_TIME_LIMIT` setting.
soft_time_limit = None

#: The result store backend used for this task.
backend = None

#: If disabled this task won't be registered automatically.
autoregister = True

#: If enabled the task will report its status as 'started' when the task
#: is executed by a worker. Disabled by default as the normal behaviour
#: is to not report that level of granularity. Tasks are either pending,
#: finished, or waiting to be retried.
#:
#: Having a 'started' status can be useful for when there are long
#: running tasks and there is a need to report which task is currently
#: running.
#:
#: The application default can be overridden using the
#: :setting:`CELERY_TRACK_STARTED` setting.
track_started = None

#: When enabled messages for this task will be acknowledged **after**
#: the task has been executed, and not *just before* which is the
#: default behavior.
#:
#: Please note that this means the task may be executed twice if the
#: worker crashes mid execution (which may be acceptable for some
#: applications).
#:
#: The application default can be overridden with the
#: :setting:`CELERY_ACKS_LATE` setting.
acks_late = None

#: Tuple of expected exceptions.
#:
#: These are errors that are expected in normal operation
#: and that should not be regarded as a real error by the worker.
#: Currently this means that the state will be updated to an error
#: state, but the worker will not log the event as an error.
throws = ()

#: Default task expiry time.
expires = None

#: Some may expect a request to exist even if the task has not been
#: called. This should probably be deprecated.
_default_request = None

_exec_options = None

__bound__ = False

from_config = (
('send_error_emails', 'CELERY_SEND_TASK_ERROR_EMAILS'),
('serializer', 'CELERY_TASK_SERIALIZER'),
('rate_limit', 'CELERY_DEFAULT_RATE_LIMIT'),
('track_started', 'CELERY_TRACK_STARTED'),
('acks_late', 'CELERY_ACKS_LATE'),
('ignore_result', 'CELERY_IGNORE_RESULT'),
('store_errors_even_if_ignored',
'CELERY_STORE_ERRORS_EVEN_IF_IGNORED'),
)

_backend = None # set by backend property.

__bound__ = False

# - Tasks are lazily bound, so that configuration is not set
# - until the task is actually used

@classmethod
def bind(self, app):
was_bound, self.__bound__ = self.__bound__, True
self._app = app
conf = app.conf
self._exec_options = None # clear option cache

for attr_name, config_name in self.from_config:
if getattr(self, attr_name, None) is None:
setattr(self, attr_name, conf[config_name])
if self.accept_magic_kwargs is None:
self.accept_magic_kwargs = app.accept_magic_kwargs

# decorate with annotations from config.
if not was_bound:
self.annotate()

from celery.utils.threads import LocalStack
self.request_stack = LocalStack()

# PeriodicTask uses this to add itself to the PeriodicTask schedule.
self.on_bound(app)

return app

@classmethod
def on_bound(self, app):
"""This method can be defined to do additional actions when the
task class is bound to an app."""
pass

@classmethod
def _get_app(self):
if self._app is None:
self._app = current_app
if not self.__bound__:
# The app property's __set__ method is not called
# if Task.app is set (on the class), so must bind on use.
self.bind(self._app)
return self._app
app = class_property(_get_app, bind)

@classmethod
def annotate(self):
for d in resolve_all_annotations(self.app.annotations, self):
for key, value in items(d):
if key.startswith('@'):
self.add_around(key[1:], value)
else:
setattr(self, key, value)

@classmethod
def add_around(self, attr, around):
orig = getattr(self, attr)
if getattr(orig, '__wrapped__', None):
orig = orig.__wrapped__
meth = around(orig)
meth.__wrapped__ = orig
setattr(self, attr, meth)

def __call__(self, *args, **kwargs):
_task_stack.push(self)
self.push_request()
try:
# add self if this is a bound task
if self.__self__ is not None:
return self.run(self.__self__, *args, **kwargs)
return self.run(*args, **kwargs)
finally:
self.pop_request()
_task_stack.pop()

def __reduce__(self):
# - tasks are pickled into the name of the task only, and the reciever
# - simply grabs it from the local registry.
# - in later versions the module of the task is also included,
# - and the receiving side tries to import that module so that
# - it will work even if the task has not been registered.
mod = type(self).__module__
mod = mod if mod and mod in sys.modules else None
return (_unpickle_task_v2, (self.name, mod), None)

def run(self, *args, **kwargs):
"""The body of the task executed by workers."""
raise NotImplementedError('Tasks must define the run method.')

def start_strategy(self, app, consumer, **kwargs):
return instantiate(self.Strategy, self, app, consumer, **kwargs)

def delay(self, *args, **kwargs):
"""Star argument version of :meth:`apply_async`.

Does not support the extra options enabled by :meth:`apply_async`.

:param \*args: positional arguments passed on to the task.
:param \*\*kwargs: keyword arguments passed on to the task.

:returns :class:`celery.result.AsyncResult`:

"""
return self.apply_async(args, kwargs)

def apply_async(self, args=None, kwargs=None, task_id=None, producer=None,
link=None, link_error=None, **options):
"""Apply tasks asynchronously by sending a message.

:keyword args: The positional arguments to pass on to the
task (a :class:`list` or :class:`tuple`).

:keyword kwargs: The keyword arguments to pass on to the
task (a :class:`dict`)

:keyword countdown: Number of seconds into the future that the
task should execute. Defaults to immediate
execution.

:keyword eta: A :class:`~datetime.datetime` object describing
the absolute time and date of when the task should
be executed. May not be specified if `countdown`
is also supplied.

:keyword expires: Either a :class:`int`, describing the number of
seconds, or a :class:`~datetime.datetime` object
that describes the absolute time and date of when
the task should expire. The task will not be
executed after the expiration time.

:keyword connection: Re-use existing broker connection instead
of establishing a new one.

:keyword retry: If enabled sending of the task message will be retried
in the event of connection loss or failure. Default
is taken from the :setting:`CELERY_TASK_PUBLISH_RETRY`
setting. Note that you need to handle the
producer/connection manually for this to work.

:keyword retry_policy: Override the retry policy used. See the
:setting:`CELERY_TASK_PUBLISH_RETRY_POLICY`
setting.

:keyword routing_key: Custom routing key used to route the task to a
worker server. If in combination with a
``queue`` argument only used to specify custom
routing keys to topic exchanges.

:keyword queue: The queue to route the task to. This must be a key
present in :setting:`CELERY_QUEUES`, or
:setting:`CELERY_CREATE_MISSING_QUEUES` must be
enabled. See :ref:`guide-routing` for more
information.

:keyword exchange: Named custom exchange to send the task to.
Usually not used in combination with the ``queue``
argument.

:keyword priority: The task priority, a number between 0 and 9.
Defaults to the :attr:`priority` attribute.

:keyword serializer: A string identifying the default
serialization method to use. Can be `pickle`,
`json`, `yaml`, `msgpack` or any custom
serialization method that has been registered
with :mod:`kombu.serialization.registry`.
Defaults to the :attr:`serializer` attribute.

:keyword compression: A string identifying the compression method
to use. Can be one of ``zlib``, ``bzip2``,
or any custom compression methods registered with
:func:`kombu.compression.register`. Defaults to
the :setting:`CELERY_MESSAGE_COMPRESSION`
setting.
:keyword link: A single, or a list of tasks to apply if the
task exits successfully.
:keyword link_error: A single, or a list of tasks to apply
if an error occurs while executing the task.

:keyword producer: :class:~@amqp.TaskProducer` instance to use.

:keyword add_to_parent: If set to True (default) and the task
is applied while executing another task, then the result
will be appended to the parent tasks ``request.children``
attribute. Trailing can also be disabled by default using the
:attr:`trail` attribute

:keyword publisher: Deprecated alias to ``producer``.

:keyword headers: Message headers to be sent in the
task (a :class:`dict`)

:rtype :class:`celery.result.AsyncResult`: if
:setting:`CELERY_ALWAYS_EAGER` is not set, otherwise
:class:`celery.result.EagerResult`.

Also supports all keyword arguments supported by
:meth:`kombu.Producer.publish`.

.. note::
If the :setting:`CELERY_ALWAYS_EAGER` setting is set, it will
be replaced by a local :func:`apply` call instead.

"""
app = self._get_app()
if app.conf.CELERY_ALWAYS_EAGER:
return self.apply(args, kwargs, task_id=task_id or uuid(),
link=link, link_error=link_error, **options)
# add 'self' if this is a "task_method".
if self.__self__ is not None:
args = args if isinstance(args, tuple) else tuple(args or ())
args = (self.__self__, ) + args
return app.send_task(
self.name, args, kwargs, task_id=task_id, producer=producer,
link=link, link_error=link_error, result_cls=self.AsyncResult,
**dict(self._get_exec_options(), **options)
)

def subtask_from_request(self, request=None, args=None, kwargs=None,
queue=None, **extra_options):
request = self.request if request is None else request
args = request.args if args is None else args
kwargs = request.kwargs if kwargs is None else kwargs
limit_hard, limit_soft = request.timelimit or (None, None)
options = {
'task_id': request.id,
'link': request.callbacks,
'link_error': request.errbacks,
'group_id': request.group,
'chord': request.chord,
'soft_time_limit': limit_soft,
'time_limit': limit_hard,
'reply_to': request.reply_to,
'headers': request.headers,
}
options.update(
{'queue': queue} if queue else (request.delivery_info or {})
)
return self.subtask(args, kwargs, options, type=self, **extra_options)

def retry(self, args=None, kwargs=None, exc=None, throw=True,
eta=None, countdown=None, max_retries=None, **options):
"""Retry the task.

:param args: Positional arguments to retry with.
:param kwargs: Keyword arguments to retry with.
:keyword exc: Custom exception to report when the max restart
limit has been exceeded (default:
:exc:`~@MaxRetriesExceededError`).

If this argument is set and retry is called while
an exception was raised (``sys.exc_info()`` is set)
it will attempt to reraise the current exception.

If no exception was raised it will raise the ``exc``
argument provided.
:keyword countdown: Time in seconds to delay the retry for.
:keyword eta: Explicit time and date to run the retry at
(must be a :class:`~datetime.datetime` instance).
:keyword max_retries: If set, overrides the default retry limit for
this execution. Changes to this parameter do not propagate to
subsequent task retry attempts. A value of :const:`None`, means
"use the default", so if you want infinite retries you would
have to set the :attr:`max_retries` attribute of the task to
:const:`None` first.
:keyword time_limit: If set, overrides the default time limit.
:keyword soft_time_limit: If set, overrides the default soft
time limit.
:keyword \*\*options: Any extra options to pass on to
meth:`apply_async`.
:keyword throw: If this is :const:`False`, do not raise the
:exc:`~@Retry` exception,
that tells the worker to mark the task as being
retried. Note that this means the task will be
marked as failed if the task raises an exception,
or successful if it returns.

:raises celery.exceptions.Retry: To tell the worker that
the task has been re-sent for retry. This always happens,
unless the `throw` keyword argument has been explicitly set
to :const:`False`, and is considered normal operation.

**Example**

.. code-block:: python

>>> from imaginary_twitter_lib import Twitter
>>> from proj.celery import app

>>> @app.task(bind=True)
... def tweet(self, auth, message):
... twitter = Twitter(oauth=auth)
... try:
... twitter.post_status_update(message)
... except twitter.FailWhale as exc:
... # Retry in 5 minutes.
... raise self.retry(countdown=60 * 5, exc=exc)

Although the task will never return above as `retry` raises an
exception to notify the worker, we use `raise` in front of the retry
to convey that the rest of the block will not be executed.

"""
request = self.request
retries = request.retries + 1
max_retries = self.max_retries if max_retries is None else max_retries

# Not in worker or emulated by (apply/always_eager),
# so just raise the original exception.
if request.called_directly:
maybe_reraise() # raise orig stack if PyErr_Occurred
raise exc or Retry('Task can be retried', None)

if not eta and countdown is None:
countdown = self.default_retry_delay

is_eager = request.is_eager
S = self.subtask_from_request(
request, args, kwargs,
countdown=countdown, eta=eta, retries=retries,
**options
)

if max_retries is not None and retries > max_retries:
if exc:
# first try to reraise the original exception
maybe_reraise()
# or if not in an except block then raise the custom exc.
raise exc
raise self.MaxRetriesExceededError(
"Can't retry {0}[{1}] args:{2} kwargs:{3}".format(
self.name, request.id, S.args, S.kwargs))

ret = Retry(exc=exc, when=eta or countdown)

if is_eager:
# if task was executed eagerly using apply(),
# then the retry must also be executed eagerly.
S.apply().get()
return ret

try:
S.apply_async()
except Exception as exc:
raise Reject(exc, requeue=False)
if throw:
raise ret
return ret

def apply(self, args=None, kwargs=None,
link=None, link_error=None, **options):
"""Execute this task locally, by blocking until the task returns.

:param args: positional arguments passed on to the task.
:param kwargs: keyword arguments passed on to the task.
:keyword throw: Re-raise task exceptions. Defaults to
the :setting:`CELERY_EAGER_PROPAGATES_EXCEPTIONS`
setting.

:rtype :class:`celery.result.EagerResult`:

"""
# trace imports Task, so need to import inline.
from celery.app.trace import eager_trace_task

app = self._get_app()
args = args or ()
# add 'self' if this is a bound method.
if self.__self__ is not None:
args = (self.__self__, ) + tuple(args)
kwargs = kwargs or {}
task_id = options.get('task_id') or uuid()
retries = options.get('retries', 0)
throw = app.either('CELERY_EAGER_PROPAGATES_EXCEPTIONS',
options.pop('throw', None))

# Make sure we get the task instance, not class.
task = app._tasks[self.name]

request = {'id': task_id,
'retries': retries,
'is_eager': True,
'logfile': options.get('logfile'),
'loglevel': options.get('loglevel', 0),
'callbacks': maybe_list(link),
'errbacks': maybe_list(link_error),
'headers': options.get('headers'),
'delivery_info': {'is_eager': True}}
if self.accept_magic_kwargs:
default_kwargs = {'task_name': task.name,
'task_id': task_id,
'task_retries': retries,
'task_is_eager': True,
'logfile': options.get('logfile'),
'loglevel': options.get('loglevel', 0),
'delivery_info': {'is_eager': True}}
supported_keys = fun_takes_kwargs(task.run, default_kwargs)
extend_with = dict((key, val)
for key, val in items(default_kwargs)
if key in supported_keys)
kwargs.update(extend_with)

tb = None
retval, info = eager_trace_task(task, task_id, args, kwargs,
app=self._get_app(),
request=request, propagate=throw)
if isinstance(retval, ExceptionInfo):
retval, tb = retval.exception, retval.traceback
state = states.SUCCESS if info is None else info.state
return EagerResult(task_id, retval, state, traceback=tb)

def AsyncResult(self, task_id, **kwargs):
"""Get AsyncResult instance for this kind of task.

:param task_id: Task id to get result for.

"""
return self._get_app().AsyncResult(task_id, backend=self.backend,
task_name=self.name, **kwargs)

def subtask(self, args=None, *starargs, **starkwargs):
"""Return :class:`~celery.signature` object for
this task, wrapping arguments and execution options
for a single task invocation."""
starkwargs.setdefault('app', self.app)
return signature(self, args, *starargs, **starkwargs)

def s(self, *args, **kwargs):
"""``.s(*a, **k) -> .subtask(a, k)``"""
return self.subtask(args, kwargs)

def si(self, *args, **kwargs):
"""``.si(*a, **k) -> .subtask(a, k, immutable=True)``"""
return self.subtask(args, kwargs, immutable=True)

def chunks(self, it, n):
"""Creates a :class:`~celery.canvas.chunks` task for this task."""
from celery import chunks
return chunks(self.s(), it, n, app=self.app)

def map(self, it):
"""Creates a :class:`~celery.canvas.xmap` task from ``it``."""
from celery import xmap
return xmap(self.s(), it, app=self.app)

def starmap(self, it):
"""Creates a :class:`~celery.canvas.xstarmap` task from ``it``."""
from celery import xstarmap
return xstarmap(self.s(), it, app=self.app)

def send_event(self, type_, **fields):
req = self.request
with self.app.events.default_dispatcher(hostname=req.hostname) as d:
return d.send(type_, uuid=req.id, **fields)

def update_state(self, task_id=None, state=None, meta=None):
"""Update task state.

:keyword task_id: Id of the task to update, defaults to the
id of the current task
:keyword state: New state (:class:`str`).
:keyword meta: State metadata (:class:`dict`).



"""
if task_id is None:
task_id = self.request.id
self.backend.store_result(task_id, meta, state)

def on_success(self, retval, task_id, args, kwargs):
"""Success handler.

Run by the worker if the task executes successfully.

:param retval: The return value of the task.
:param task_id: Unique id of the executed task.
:param args: Original arguments for the executed task.
:param kwargs: Original keyword arguments for the executed task.

The return value of this handler is ignored.

"""
pass

def on_retry(self, exc, task_id, args, kwargs, einfo):
"""Retry handler.

This is run by the worker when the task is to be retried.

:param exc: The exception sent to :meth:`retry`.
:param task_id: Unique id of the retried task.
:param args: Original arguments for the retried task.
:param kwargs: Original keyword arguments for the retried task.

:keyword einfo: :class:`~billiard.einfo.ExceptionInfo`
instance, containing the traceback.

The return value of this handler is ignored.

"""
pass

def on_failure(self, exc, task_id, args, kwargs, einfo):
"""Error handler.

This is run by the worker when the task fails.

:param exc: The exception raised by the task.
:param task_id: Unique id of the failed task.
:param args: Original arguments for the task that failed.
:param kwargs: Original keyword arguments for the task
that failed.

:keyword einfo: :class:`~billiard.einfo.ExceptionInfo`
instance, containing the traceback.

The return value of this handler is ignored.

"""
pass

def after_return(self, status, retval, task_id, args, kwargs, einfo):
"""Handler called after the task returns.

:param status: Current task state.
:param retval: Task return value/exception.
:param task_id: Unique id of the task.
:param args: Original arguments for the task.
:param kwargs: Original keyword arguments for the task.

:keyword einfo: :class:`~billiard.einfo.ExceptionInfo`
instance, containing the traceback (if any).

The return value of this handler is ignored.

"""
pass

def send_error_email(self, context, exc, **kwargs):
if self.send_error_emails and \
not getattr(self, 'disable_error_emails', None):
self.ErrorMail(self, **kwargs).send(context, exc)

def add_trail(self, result):
if self.trail:
self.request.children.append(result)
return result

def push_request(self, *args, **kwargs):
self.request_stack.push(Context(*args, **kwargs))

def pop_request(self):
self.request_stack.pop()

def __repr__(self):
"""`repr(task)`"""
return _reprtask(self, R_SELF_TASK if self.__self__ else R_INSTANCE)

def _get_request(self):
"""Get current request object."""
req = self.request_stack.top
if req is None:
# task was not called, but some may still expect a request
# to be there, perhaps that should be deprecated.
if self._default_request is None:
self._default_request = Context()
return self._default_request
return req
request = property(_get_request)

def _get_exec_options(self):
if self._exec_options is None:
self._exec_options = extract_exec_options(self)
return self._exec_options

@property
def backend(self):
backend = self._backend
if backend is None:
return self.app.backend
return backend

@backend.setter
def backend(self, value): # noqa
self._backend = value

@property
def __name__(self):
return self.__class__.__name__
BaseTask = Task # compat alias

+ 0
- 441
thesisenv/lib/python3.6/site-packages/celery/app/trace.py View File

# -*- coding: utf-8 -*-
"""
celery.app.trace
~~~~~~~~~~~~~~~~

This module defines how the task execution is traced:
errors are recorded, handlers are applied and so on.

"""
from __future__ import absolute_import

# ## ---
# This is the heart of the worker, the inner loop so to speak.
# It used to be split up into nice little classes and methods,
# but in the end it only resulted in bad performance and horrible tracebacks,
# so instead we now use one closure per task class.

import os
import socket
import sys

from warnings import warn

from billiard.einfo import ExceptionInfo
from kombu.exceptions import EncodeError
from kombu.utils import kwdict

from celery import current_app, group
from celery import states, signals
from celery._state import _task_stack
from celery.app import set_default_app
from celery.app.task import Task as BaseTask, Context
from celery.exceptions import Ignore, Reject, Retry
from celery.utils.log import get_logger
from celery.utils.objects import mro_lookup
from celery.utils.serialization import (
get_pickleable_exception,
get_pickleable_etype,
)

__all__ = ['TraceInfo', 'build_tracer', 'trace_task', 'eager_trace_task',
'setup_worker_optimizations', 'reset_worker_optimizations']

_logger = get_logger(__name__)

send_prerun = signals.task_prerun.send
send_postrun = signals.task_postrun.send
send_success = signals.task_success.send
STARTED = states.STARTED
SUCCESS = states.SUCCESS
IGNORED = states.IGNORED
REJECTED = states.REJECTED
RETRY = states.RETRY
FAILURE = states.FAILURE
EXCEPTION_STATES = states.EXCEPTION_STATES
IGNORE_STATES = frozenset([IGNORED, RETRY, REJECTED])

#: set by :func:`setup_worker_optimizations`
_tasks = None
_patched = {}


def task_has_custom(task, attr):
"""Return true if the task or one of its bases
defines ``attr`` (excluding the one in BaseTask)."""
return mro_lookup(task.__class__, attr, stop=(BaseTask, object),
monkey_patched=['celery.app.task'])


class TraceInfo(object):
__slots__ = ('state', 'retval')

def __init__(self, state, retval=None):
self.state = state
self.retval = retval

def handle_error_state(self, task, eager=False):
store_errors = not eager
if task.ignore_result:
store_errors = task.store_errors_even_if_ignored

return {
RETRY: self.handle_retry,
FAILURE: self.handle_failure,
}[self.state](task, store_errors=store_errors)

def handle_retry(self, task, store_errors=True):
"""Handle retry exception."""
# the exception raised is the Retry semi-predicate,
# and it's exc' attribute is the original exception raised (if any).
req = task.request
type_, _, tb = sys.exc_info()
try:
reason = self.retval
einfo = ExceptionInfo((type_, reason, tb))
if store_errors:
task.backend.mark_as_retry(
req.id, reason.exc, einfo.traceback, request=req,
)
task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo)
signals.task_retry.send(sender=task, request=req,
reason=reason, einfo=einfo)
return einfo
finally:
del(tb)

def handle_failure(self, task, store_errors=True):
"""Handle exception."""
req = task.request
type_, _, tb = sys.exc_info()
try:
exc = self.retval
einfo = ExceptionInfo()
einfo.exception = get_pickleable_exception(einfo.exception)
einfo.type = get_pickleable_etype(einfo.type)
if store_errors:
task.backend.mark_as_failure(
req.id, exc, einfo.traceback, request=req,
)
task.on_failure(exc, req.id, req.args, req.kwargs, einfo)
signals.task_failure.send(sender=task, task_id=req.id,
exception=exc, args=req.args,
kwargs=req.kwargs,
traceback=tb,
einfo=einfo)
return einfo
finally:
del(tb)


def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
Info=TraceInfo, eager=False, propagate=False, app=None,
IGNORE_STATES=IGNORE_STATES):
"""Return a function that traces task execution; catches all
exceptions and updates result backend with the state and result

If the call was successful, it saves the result to the task result
backend, and sets the task status to `"SUCCESS"`.

If the call raises :exc:`~@Retry`, it extracts
the original exception, uses that as the result and sets the task state
to `"RETRY"`.

If the call results in an exception, it saves the exception as the task
result, and sets the task state to `"FAILURE"`.

Return a function that takes the following arguments:

:param uuid: The id of the task.
:param args: List of positional args to pass on to the function.
:param kwargs: Keyword arguments mapping to pass on to the function.
:keyword request: Request dict.

"""
# If the task doesn't define a custom __call__ method
# we optimize it away by simply calling the run method directly,
# saving the extra method call and a line less in the stack trace.
fun = task if task_has_custom(task, '__call__') else task.run

loader = loader or app.loader
backend = task.backend
ignore_result = task.ignore_result
track_started = task.track_started
track_started = not eager and (task.track_started and not ignore_result)
publish_result = not eager and not ignore_result
hostname = hostname or socket.gethostname()

loader_task_init = loader.on_task_init
loader_cleanup = loader.on_process_cleanup

task_on_success = None
task_after_return = None
if task_has_custom(task, 'on_success'):
task_on_success = task.on_success
if task_has_custom(task, 'after_return'):
task_after_return = task.after_return

store_result = backend.store_result
backend_cleanup = backend.process_cleanup

pid = os.getpid()

request_stack = task.request_stack
push_request = request_stack.push
pop_request = request_stack.pop
push_task = _task_stack.push
pop_task = _task_stack.pop
on_chord_part_return = backend.on_chord_part_return

prerun_receivers = signals.task_prerun.receivers
postrun_receivers = signals.task_postrun.receivers
success_receivers = signals.task_success.receivers

from celery import canvas
signature = canvas.maybe_signature # maybe_ does not clone if already

def on_error(request, exc, uuid, state=FAILURE, call_errbacks=True):
if propagate:
raise
I = Info(state, exc)
R = I.handle_error_state(task, eager=eager)
if call_errbacks:
group(
[signature(errback, app=app)
for errback in request.errbacks or []], app=app,
).apply_async((uuid, ))
return I, R, I.state, I.retval

def trace_task(uuid, args, kwargs, request=None):
# R - is the possibly prepared return value.
# I - is the Info object.
# retval - is the always unmodified return value.
# state - is the resulting task state.

# This function is very long because we have unrolled all the calls
# for performance reasons, and because the function is so long
# we want the main variables (I, and R) to stand out visually from the
# the rest of the variables, so breaking PEP8 is worth it ;)
R = I = retval = state = None
kwargs = kwdict(kwargs)
try:
push_task(task)
task_request = Context(request or {}, args=args,
called_directly=False, kwargs=kwargs)
push_request(task_request)
try:
# -*- PRE -*-
if prerun_receivers:
send_prerun(sender=task, task_id=uuid, task=task,
args=args, kwargs=kwargs)
loader_task_init(uuid, task)
if track_started:
store_result(
uuid, {'pid': pid, 'hostname': hostname}, STARTED,
request=task_request,
)

# -*- TRACE -*-
try:
R = retval = fun(*args, **kwargs)
state = SUCCESS
except Reject as exc:
I, R = Info(REJECTED, exc), ExceptionInfo(internal=True)
state, retval = I.state, I.retval
except Ignore as exc:
I, R = Info(IGNORED, exc), ExceptionInfo(internal=True)
state, retval = I.state, I.retval
except Retry as exc:
I, R, state, retval = on_error(
task_request, exc, uuid, RETRY, call_errbacks=False,
)
except Exception as exc:
I, R, state, retval = on_error(task_request, exc, uuid)
except BaseException as exc:
raise
else:
try:
# callback tasks must be applied before the result is
# stored, so that result.children is populated.

# groups are called inline and will store trail
# separately, so need to call them separately
# so that the trail's not added multiple times :(
# (Issue #1936)
callbacks = task.request.callbacks
if callbacks:
if len(task.request.callbacks) > 1:
sigs, groups = [], []
for sig in callbacks:
sig = signature(sig, app=app)
if isinstance(sig, group):
groups.append(sig)
else:
sigs.append(sig)
for group_ in groups:
group_.apply_async((retval, ))
if sigs:
group(sigs).apply_async((retval, ))
else:
signature(callbacks[0], app=app).delay(retval)
if publish_result:
store_result(
uuid, retval, SUCCESS, request=task_request,
)
except EncodeError as exc:
I, R, state, retval = on_error(task_request, exc, uuid)
else:
if task_on_success:
task_on_success(retval, uuid, args, kwargs)
if success_receivers:
send_success(sender=task, result=retval)

# -* POST *-
if state not in IGNORE_STATES:
if task_request.chord:
on_chord_part_return(task, state, R)
if task_after_return:
task_after_return(
state, retval, uuid, args, kwargs, None,
)
finally:
try:
if postrun_receivers:
send_postrun(sender=task, task_id=uuid, task=task,
args=args, kwargs=kwargs,
retval=retval, state=state)
finally:
pop_task()
pop_request()
if not eager:
try:
backend_cleanup()
loader_cleanup()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as exc:
_logger.error('Process cleanup failed: %r', exc,
exc_info=True)
except MemoryError:
raise
except Exception as exc:
if eager:
raise
R = report_internal_error(task, exc)
return R, I

return trace_task


def trace_task(task, uuid, args, kwargs, request={}, **opts):
try:
if task.__trace__ is None:
task.__trace__ = build_tracer(task.name, task, **opts)
return task.__trace__(uuid, args, kwargs, request)[0]
except Exception as exc:
return report_internal_error(task, exc)


def _trace_task_ret(name, uuid, args, kwargs, request={}, app=None, **opts):
app = app or current_app
return trace_task(app.tasks[name],
uuid, args, kwargs, request, app=app, **opts)
trace_task_ret = _trace_task_ret


def _fast_trace_task(task, uuid, args, kwargs, request={}):
# setup_worker_optimizations will point trace_task_ret to here,
# so this is the function used in the worker.
return _tasks[task].__trace__(uuid, args, kwargs, request)[0]


def eager_trace_task(task, uuid, args, kwargs, request=None, **opts):
opts.setdefault('eager', True)
return build_tracer(task.name, task, **opts)(
uuid, args, kwargs, request)


def report_internal_error(task, exc):
_type, _value, _tb = sys.exc_info()
try:
_value = task.backend.prepare_exception(exc, 'pickle')
exc_info = ExceptionInfo((_type, _value, _tb), internal=True)
warn(RuntimeWarning(
'Exception raised outside body: {0!r}:\n{1}'.format(
exc, exc_info.traceback)))
return exc_info
finally:
del(_tb)


def setup_worker_optimizations(app):
global _tasks
global trace_task_ret

# make sure custom Task.__call__ methods that calls super
# will not mess up the request/task stack.
_install_stack_protection()

# all new threads start without a current app, so if an app is not
# passed on to the thread it will fall back to the "default app",
# which then could be the wrong app. So for the worker
# we set this to always return our app. This is a hack,
# and means that only a single app can be used for workers
# running in the same process.
app.set_current()
set_default_app(app)

# evaluate all task classes by finalizing the app.
app.finalize()

# set fast shortcut to task registry
_tasks = app._tasks

trace_task_ret = _fast_trace_task
from celery.worker import job as job_module
job_module.trace_task_ret = _fast_trace_task
job_module.__optimize__()


def reset_worker_optimizations():
global trace_task_ret
trace_task_ret = _trace_task_ret
try:
delattr(BaseTask, '_stackprotected')
except AttributeError:
pass
try:
BaseTask.__call__ = _patched.pop('BaseTask.__call__')
except KeyError:
pass
from celery.worker import job as job_module
job_module.trace_task_ret = _trace_task_ret


def _install_stack_protection():
# Patches BaseTask.__call__ in the worker to handle the edge case
# where people override it and also call super.
#
# - The worker optimizes away BaseTask.__call__ and instead
# calls task.run directly.
# - so with the addition of current_task and the request stack
# BaseTask.__call__ now pushes to those stacks so that
# they work when tasks are called directly.
#
# The worker only optimizes away __call__ in the case
# where it has not been overridden, so the request/task stack
# will blow if a custom task class defines __call__ and also
# calls super().
if not getattr(BaseTask, '_stackprotected', False):
_patched['BaseTask.__call__'] = orig = BaseTask.__call__

def __protected_call__(self, *args, **kwargs):
stack = self.request_stack
req = stack.top
if req and not req._protected and \
len(stack) == 1 and not req.called_directly:
req._protected = 1
return self.run(*args, **kwargs)
return orig(self, *args, **kwargs)
BaseTask.__call__ = __protected_call__
BaseTask._stackprotected = True

+ 0
- 266
thesisenv/lib/python3.6/site-packages/celery/app/utils.py View File

# -*- coding: utf-8 -*-
"""
celery.app.utils
~~~~~~~~~~~~~~~~

App utilities: Compat settings, bugreport tool, pickling apps.

"""
from __future__ import absolute_import

import os
import platform as _platform
import re

from collections import Mapping
from types import ModuleType

from kombu.utils.url import maybe_sanitize_url

from celery.datastructures import ConfigurationView
from celery.five import items, string_t, values
from celery.platforms import pyimplementation
from celery.utils.text import pretty
from celery.utils.imports import import_from_cwd, symbol_by_name, qualname

from .defaults import find

__all__ = ['Settings', 'appstr', 'bugreport',
'filter_hidden_settings', 'find_app']

#: Format used to generate bugreport information.
BUGREPORT_INFO = """
software -> celery:{celery_v} kombu:{kombu_v} py:{py_v}
billiard:{billiard_v} {driver_v}
platform -> system:{system} arch:{arch} imp:{py_i}
loader -> {loader}
settings -> transport:{transport} results:{results}

{human_settings}
"""

HIDDEN_SETTINGS = re.compile(
'API|TOKEN|KEY|SECRET|PASS|PROFANITIES_LIST|SIGNATURE|DATABASE',
re.IGNORECASE,
)


def appstr(app):
"""String used in __repr__ etc, to id app instances."""
return '{0}:0x{1:x}'.format(app.main or '__main__', id(app))


class Settings(ConfigurationView):
"""Celery settings object.

.. seealso:

:ref:`configuration` for a full list of configuration keys.

"""

@property
def CELERY_RESULT_BACKEND(self):
return self.first('CELERY_RESULT_BACKEND', 'CELERY_BACKEND')

@property
def BROKER_TRANSPORT(self):
return self.first('BROKER_TRANSPORT',
'BROKER_BACKEND', 'CARROT_BACKEND')

@property
def BROKER_BACKEND(self):
"""Deprecated compat alias to :attr:`BROKER_TRANSPORT`."""
return self.BROKER_TRANSPORT

@property
def BROKER_URL(self):
return (os.environ.get('CELERY_BROKER_URL') or
self.first('BROKER_URL', 'BROKER_HOST'))

@property
def CELERY_TIMEZONE(self):
# this way we also support django's time zone.
return self.first('CELERY_TIMEZONE', 'TIME_ZONE')

def without_defaults(self):
"""Return the current configuration, but without defaults."""
# the last stash is the default settings, so just skip that
return Settings({}, self._order[:-1])

def value_set_for(self, key):
return key in self.without_defaults()

def find_option(self, name, namespace='celery'):
"""Search for option by name.

Will return ``(namespace, key, type)`` tuple, e.g.::

>>> from proj.celery import app
>>> app.conf.find_option('disable_rate_limits')
('CELERY', 'DISABLE_RATE_LIMITS',
<Option: type->bool default->False>))

:param name: Name of option, cannot be partial.
:keyword namespace: Preferred namespace (``CELERY`` by default).

"""
return find(name, namespace)

def find_value_for_key(self, name, namespace='celery'):
"""Shortcut to ``get_by_parts(*find_option(name)[:-1])``"""
return self.get_by_parts(*self.find_option(name, namespace)[:-1])

def get_by_parts(self, *parts):
"""Return the current value for setting specified as a path.

Example::

>>> from proj.celery import app
>>> app.conf.get_by_parts('CELERY', 'DISABLE_RATE_LIMITS')
False

"""
return self['_'.join(part for part in parts if part)]

def table(self, with_defaults=False, censored=True):
filt = filter_hidden_settings if censored else lambda v: v
return filt(dict(
(k, v) for k, v in items(
self if with_defaults else self.without_defaults())
if k.isupper() and not k.startswith('_')
))

def humanize(self, with_defaults=False, censored=True):
"""Return a human readable string showing changes to the
configuration."""
return '\n'.join(
'{0}: {1}'.format(key, pretty(value, width=50))
for key, value in items(self.table(with_defaults, censored)))


class AppPickler(object):
"""Old application pickler/unpickler (< 3.1)."""

def __call__(self, cls, *args):
kwargs = self.build_kwargs(*args)
app = self.construct(cls, **kwargs)
self.prepare(app, **kwargs)
return app

def prepare(self, app, **kwargs):
app.conf.update(kwargs['changes'])

def build_kwargs(self, *args):
return self.build_standard_kwargs(*args)

def build_standard_kwargs(self, main, changes, loader, backend, amqp,
events, log, control, accept_magic_kwargs,
config_source=None):
return dict(main=main, loader=loader, backend=backend, amqp=amqp,
changes=changes, events=events, log=log, control=control,
set_as_current=False,
accept_magic_kwargs=accept_magic_kwargs,
config_source=config_source)

def construct(self, cls, **kwargs):
return cls(**kwargs)


def _unpickle_app(cls, pickler, *args):
"""Rebuild app for versions 2.5+"""
return pickler()(cls, *args)


def _unpickle_app_v2(cls, kwargs):
"""Rebuild app for versions 3.1+"""
kwargs['set_as_current'] = False
return cls(**kwargs)


def filter_hidden_settings(conf):

def maybe_censor(key, value, mask='*' * 8):
if isinstance(value, Mapping):
return filter_hidden_settings(value)
if isinstance(key, string_t):
if HIDDEN_SETTINGS.search(key):
return mask
elif 'BROKER_URL' in key.upper():
from kombu import Connection
return Connection(value).as_uri(mask=mask)
elif key.upper() in ('CELERY_RESULT_BACKEND', 'CELERY_BACKEND'):
return maybe_sanitize_url(value, mask=mask)

return value

return dict((k, maybe_censor(k, v)) for k, v in items(conf))


def bugreport(app):
"""Return a string containing information useful in bug reports."""
import billiard
import celery
import kombu

try:
conn = app.connection()
driver_v = '{0}:{1}'.format(conn.transport.driver_name,
conn.transport.driver_version())
transport = conn.transport_cls
except Exception:
transport = driver_v = ''

return BUGREPORT_INFO.format(
system=_platform.system(),
arch=', '.join(x for x in _platform.architecture() if x),
py_i=pyimplementation(),
celery_v=celery.VERSION_BANNER,
kombu_v=kombu.__version__,
billiard_v=billiard.__version__,
py_v=_platform.python_version(),
driver_v=driver_v,
transport=transport,
results=maybe_sanitize_url(
app.conf.CELERY_RESULT_BACKEND or 'disabled'),
human_settings=app.conf.humanize(),
loader=qualname(app.loader.__class__),
)


def find_app(app, symbol_by_name=symbol_by_name, imp=import_from_cwd):
from .base import Celery

try:
sym = symbol_by_name(app, imp=imp)
except AttributeError:
# last part was not an attribute, but a module
sym = imp(app)
if isinstance(sym, ModuleType) and ':' not in app:
try:
found = sym.app
if isinstance(found, ModuleType):
raise AttributeError()
except AttributeError:
try:
found = sym.celery
if isinstance(found, ModuleType):
raise AttributeError()
except AttributeError:
if getattr(sym, '__path__', None):
try:
return find_app(
'{0}.celery'.format(app),
symbol_by_name=symbol_by_name, imp=imp,
)
except ImportError:
pass
for suspect in values(vars(sym)):
if isinstance(suspect, Celery):
return suspect
raise
else:
return found
else:
return found
return sym

+ 0
- 0
thesisenv/lib/python3.6/site-packages/celery/apps/__init__.py View File


+ 0
- 151
thesisenv/lib/python3.6/site-packages/celery/apps/beat.py View File

# -*- coding: utf-8 -*-
"""
celery.apps.beat
~~~~~~~~~~~~~~~~

This module is the 'program-version' of :mod:`celery.beat`.

It does everything necessary to run that module
as an actual application, like installing signal handlers
and so on.

"""
from __future__ import absolute_import, unicode_literals

import numbers
import socket
import sys

from celery import VERSION_BANNER, platforms, beat
from celery.utils.imports import qualname
from celery.utils.log import LOG_LEVELS, get_logger
from celery.utils.timeutils import humanize_seconds

__all__ = ['Beat']

STARTUP_INFO_FMT = """
Configuration ->
. broker -> {conninfo}
. loader -> {loader}
. scheduler -> {scheduler}
{scheduler_info}
. logfile -> {logfile}@%{loglevel}
. maxinterval -> {hmax_interval} ({max_interval}s)
""".strip()

logger = get_logger('celery.beat')


class Beat(object):
Service = beat.Service
app = None

def __init__(self, max_interval=None, app=None,
socket_timeout=30, pidfile=None, no_color=None,
loglevel=None, logfile=None, schedule=None,
scheduler_cls=None, redirect_stdouts=None,
redirect_stdouts_level=None, **kwargs):
"""Starts the beat task scheduler."""
self.app = app = app or self.app
self.loglevel = self._getopt('log_level', loglevel)
self.logfile = self._getopt('log_file', logfile)
self.schedule = self._getopt('schedule_filename', schedule)
self.scheduler_cls = self._getopt('scheduler', scheduler_cls)
self.redirect_stdouts = self._getopt(
'redirect_stdouts', redirect_stdouts,
)
self.redirect_stdouts_level = self._getopt(
'redirect_stdouts_level', redirect_stdouts_level,
)

self.max_interval = max_interval
self.socket_timeout = socket_timeout
self.no_color = no_color
self.colored = app.log.colored(
self.logfile,
enabled=not no_color if no_color is not None else no_color,
)
self.pidfile = pidfile

if not isinstance(self.loglevel, numbers.Integral):
self.loglevel = LOG_LEVELS[self.loglevel.upper()]

def _getopt(self, key, value):
if value is not None:
return value
return self.app.conf.find_value_for_key(key, namespace='celerybeat')

def run(self):
print(str(self.colored.cyan(
'celery beat v{0} is starting.'.format(VERSION_BANNER))))
self.init_loader()
self.set_process_title()
self.start_scheduler()

def setup_logging(self, colorize=None):
if colorize is None and self.no_color is not None:
colorize = not self.no_color
self.app.log.setup(self.loglevel, self.logfile,
self.redirect_stdouts, self.redirect_stdouts_level,
colorize=colorize)

def start_scheduler(self):
c = self.colored
if self.pidfile:
platforms.create_pidlock(self.pidfile)
beat = self.Service(app=self.app,
max_interval=self.max_interval,
scheduler_cls=self.scheduler_cls,
schedule_filename=self.schedule)

print(str(c.blue('__ ', c.magenta('-'),
c.blue(' ... __ '), c.magenta('-'),
c.blue(' _\n'),
c.reset(self.startup_info(beat)))))
self.setup_logging()
if self.socket_timeout:
logger.debug('Setting default socket timeout to %r',
self.socket_timeout)
socket.setdefaulttimeout(self.socket_timeout)
try:
self.install_sync_handler(beat)
beat.start()
except Exception as exc:
logger.critical('beat raised exception %s: %r',
exc.__class__, exc,
exc_info=True)

def init_loader(self):
# Run the worker init handler.
# (Usually imports task modules and such.)
self.app.loader.init_worker()
self.app.finalize()

def startup_info(self, beat):
scheduler = beat.get_scheduler(lazy=True)
return STARTUP_INFO_FMT.format(
conninfo=self.app.connection().as_uri(),
logfile=self.logfile or '[stderr]',
loglevel=LOG_LEVELS[self.loglevel],
loader=qualname(self.app.loader),
scheduler=qualname(scheduler),
scheduler_info=scheduler.info,
hmax_interval=humanize_seconds(beat.max_interval),
max_interval=beat.max_interval,
)

def set_process_title(self):
arg_start = 'manage' in sys.argv[0] and 2 or 1
platforms.set_process_title(
'celery beat', info=' '.join(sys.argv[arg_start:]),
)

def install_sync_handler(self, beat):
"""Install a `SIGTERM` + `SIGINT` handler that saves
the beat schedule."""

def _sync(signum, frame):
beat.sync()
raise SystemExit()

platforms.signals.update(SIGTERM=_sync, SIGINT=_sync)

+ 0
- 372
thesisenv/lib/python3.6/site-packages/celery/apps/worker.py View File

# -*- coding: utf-8 -*-
"""
celery.apps.worker
~~~~~~~~~~~~~~~~~~

This module is the 'program-version' of :mod:`celery.worker`.

It does everything necessary to run that module
as an actual application, like installing signal handlers,
platform tweaks, and so on.

"""
from __future__ import absolute_import, print_function, unicode_literals

import logging
import os
import platform as _platform
import sys
import warnings

from functools import partial

from billiard import current_process
from kombu.utils.encoding import safe_str

from celery import VERSION_BANNER, platforms, signals
from celery.app import trace
from celery.exceptions import (
CDeprecationWarning, WorkerShutdown, WorkerTerminate,
)
from celery.five import string, string_t
from celery.loaders.app import AppLoader
from celery.platforms import check_privileges
from celery.utils import cry, isatty
from celery.utils.imports import qualname
from celery.utils.log import get_logger, in_sighandler, set_in_sighandler
from celery.utils.text import pluralize
from celery.worker import WorkController

__all__ = ['Worker']

logger = get_logger(__name__)
is_jython = sys.platform.startswith('java')
is_pypy = hasattr(sys, 'pypy_version_info')

W_PICKLE_DEPRECATED = """
Starting from version 3.2 Celery will refuse to accept pickle by default.

The pickle serializer is a security concern as it may give attackers
the ability to execute any command. It's important to secure
your broker from unauthorized access when using pickle, so we think
that enabling pickle should require a deliberate action and not be
the default choice.

If you depend on pickle then you should set a setting to disable this
warning and to be sure that everything will continue working
when you upgrade to Celery 3.2::

CELERY_ACCEPT_CONTENT = ['pickle', 'json', 'msgpack', 'yaml']

You must only enable the serializers that you will actually use.

"""


def active_thread_count():
from threading import enumerate
return sum(1 for t in enumerate()
if not t.name.startswith('Dummy-'))


def safe_say(msg):
print('\n{0}'.format(msg), file=sys.__stderr__)

ARTLINES = [
' --------------',
'---- **** -----',
'--- * *** * --',
'-- * - **** ---',
'- ** ----------',
'- ** ----------',
'- ** ----------',
'- ** ----------',
'- *** --- * ---',
'-- ******* ----',
'--- ***** -----',
' --------------',
]

BANNER = """\
{hostname} v{version}

{platform}

[config]
.> app: {app}
.> transport: {conninfo}
.> results: {results}
.> concurrency: {concurrency}

[queues]
{queues}
"""

EXTRA_INFO_FMT = """
[tasks]
{tasks}
"""


class Worker(WorkController):

def on_before_init(self, **kwargs):
trace.setup_worker_optimizations(self.app)

# this signal can be used to set up configuration for
# workers by name.
signals.celeryd_init.send(
sender=self.hostname, instance=self,
conf=self.app.conf, options=kwargs,
)
check_privileges(self.app.conf.CELERY_ACCEPT_CONTENT)

def on_after_init(self, purge=False, no_color=None,
redirect_stdouts=None, redirect_stdouts_level=None,
**kwargs):
self.redirect_stdouts = self._getopt(
'redirect_stdouts', redirect_stdouts,
)
self.redirect_stdouts_level = self._getopt(
'redirect_stdouts_level', redirect_stdouts_level,
)
super(Worker, self).setup_defaults(**kwargs)
self.purge = purge
self.no_color = no_color
self._isatty = isatty(sys.stdout)
self.colored = self.app.log.colored(
self.logfile,
enabled=not no_color if no_color is not None else no_color
)

def on_init_blueprint(self):
self._custom_logging = self.setup_logging()
# apply task execution optimizations
# -- This will finalize the app!
trace.setup_worker_optimizations(self.app)

def on_start(self):
if not self._custom_logging and self.redirect_stdouts:
self.app.log.redirect_stdouts(self.redirect_stdouts_level)

WorkController.on_start(self)

# this signal can be used to e.g. change queues after
# the -Q option has been applied.
signals.celeryd_after_setup.send(
sender=self.hostname, instance=self, conf=self.app.conf,
)

if not self.app.conf.value_set_for('CELERY_ACCEPT_CONTENT'):
warnings.warn(CDeprecationWarning(W_PICKLE_DEPRECATED))

if self.purge:
self.purge_messages()

# Dump configuration to screen so we have some basic information
# for when users sends bug reports.
print(safe_str(''.join([
string(self.colored.cyan(' \n', self.startup_info())),
string(self.colored.reset(self.extra_info() or '')),
])), file=sys.__stdout__)
self.set_process_status('-active-')
self.install_platform_tweaks(self)

def on_consumer_ready(self, consumer):
signals.worker_ready.send(sender=consumer)
print('{0} ready.'.format(safe_str(self.hostname), ))

def setup_logging(self, colorize=None):
if colorize is None and self.no_color is not None:
colorize = not self.no_color
return self.app.log.setup(
self.loglevel, self.logfile,
redirect_stdouts=False, colorize=colorize, hostname=self.hostname,
)

def purge_messages(self):
count = self.app.control.purge()
if count:
print('purge: Erased {0} {1} from the queue.\n'.format(
count, pluralize(count, 'message')))

def tasklist(self, include_builtins=True, sep='\n', int_='celery.'):
return sep.join(
' . {0}'.format(task) for task in sorted(self.app.tasks)
if (not task.startswith(int_) if not include_builtins else task)
)

def extra_info(self):
if self.loglevel <= logging.INFO:
include_builtins = self.loglevel <= logging.DEBUG
tasklist = self.tasklist(include_builtins=include_builtins)
return EXTRA_INFO_FMT.format(tasks=tasklist)

def startup_info(self):
app = self.app
concurrency = string(self.concurrency)
appr = '{0}:0x{1:x}'.format(app.main or '__main__', id(app))
if not isinstance(app.loader, AppLoader):
loader = qualname(app.loader)
if loader.startswith('celery.loaders'):
loader = loader[14:]
appr += ' ({0})'.format(loader)
if self.autoscale:
max, min = self.autoscale
concurrency = '{{min={0}, max={1}}}'.format(min, max)
pool = self.pool_cls
if not isinstance(pool, string_t):
pool = pool.__module__
concurrency += ' ({0})'.format(pool.split('.')[-1])
events = 'ON'
if not self.send_events:
events = 'OFF (enable -E to monitor this worker)'

banner = BANNER.format(
app=appr,
hostname=safe_str(self.hostname),
version=VERSION_BANNER,
conninfo=self.app.connection().as_uri(),
results=self.app.backend.as_uri(),
concurrency=concurrency,
platform=safe_str(_platform.platform()),
events=events,
queues=app.amqp.queues.format(indent=0, indent_first=False),
).splitlines()

# integrate the ASCII art.
for i, x in enumerate(banner):
try:
banner[i] = ' '.join([ARTLINES[i], banner[i]])
except IndexError:
banner[i] = ' ' * 16 + banner[i]
return '\n'.join(banner) + '\n'

def install_platform_tweaks(self, worker):
"""Install platform specific tweaks and workarounds."""
if self.app.IS_OSX:
self.osx_proxy_detection_workaround()

# Install signal handler so SIGHUP restarts the worker.
if not self._isatty:
# only install HUP handler if detached from terminal,
# so closing the terminal window doesn't restart the worker
# into the background.
if self.app.IS_OSX:
# OS X can't exec from a process using threads.
# See http://github.com/celery/celery/issues#issue/152
install_HUP_not_supported_handler(worker)
else:
install_worker_restart_handler(worker)
install_worker_term_handler(worker)
install_worker_term_hard_handler(worker)
install_worker_int_handler(worker)
install_cry_handler()
install_rdb_handler()

def osx_proxy_detection_workaround(self):
"""See http://github.com/celery/celery/issues#issue/161"""
os.environ.setdefault('celery_dummy_proxy', 'set_by_celeryd')

def set_process_status(self, info):
return platforms.set_mp_process_title(
'celeryd',
info='{0} ({1})'.format(info, platforms.strargv(sys.argv)),
hostname=self.hostname,
)


def _shutdown_handler(worker, sig='TERM', how='Warm',
exc=WorkerShutdown, callback=None):

def _handle_request(*args):
with in_sighandler():
from celery.worker import state
if current_process()._name == 'MainProcess':
if callback:
callback(worker)
safe_say('worker: {0} shutdown (MainProcess)'.format(how))
if active_thread_count() > 1:
setattr(state, {'Warm': 'should_stop',
'Cold': 'should_terminate'}[how], True)
else:
raise exc()
_handle_request.__name__ = str('worker_{0}'.format(how))
platforms.signals[sig] = _handle_request
install_worker_term_handler = partial(
_shutdown_handler, sig='SIGTERM', how='Warm', exc=WorkerShutdown,
)
if not is_jython: # pragma: no cover
install_worker_term_hard_handler = partial(
_shutdown_handler, sig='SIGQUIT', how='Cold', exc=WorkerTerminate,
)
else: # pragma: no cover
install_worker_term_handler = \
install_worker_term_hard_handler = lambda *a, **kw: None


def on_SIGINT(worker):
safe_say('worker: Hitting Ctrl+C again will terminate all running tasks!')
install_worker_term_hard_handler(worker, sig='SIGINT')
if not is_jython: # pragma: no cover
install_worker_int_handler = partial(
_shutdown_handler, sig='SIGINT', callback=on_SIGINT
)
else: # pragma: no cover
def install_worker_int_handler(*a, **kw):
pass


def _reload_current_worker():
platforms.close_open_fds([
sys.__stdin__, sys.__stdout__, sys.__stderr__,
])
os.execv(sys.executable, [sys.executable] + sys.argv)


def install_worker_restart_handler(worker, sig='SIGHUP'):

def restart_worker_sig_handler(*args):
"""Signal handler restarting the current python program."""
set_in_sighandler(True)
safe_say('Restarting celery worker ({0})'.format(' '.join(sys.argv)))
import atexit
atexit.register(_reload_current_worker)
from celery.worker import state
state.should_stop = True
platforms.signals[sig] = restart_worker_sig_handler


def install_cry_handler(sig='SIGUSR1'):
# Jython/PyPy does not have sys._current_frames
if is_jython or is_pypy: # pragma: no cover
return

def cry_handler(*args):
"""Signal handler logging the stacktrace of all active threads."""
with in_sighandler():
safe_say(cry())
platforms.signals[sig] = cry_handler


def install_rdb_handler(envvar='CELERY_RDBSIG',
sig='SIGUSR2'): # pragma: no cover

def rdb_handler(*args):
"""Signal handler setting a rdb breakpoint at the current frame."""
with in_sighandler():
from celery.contrib.rdb import set_trace, _frame
# gevent does not pass standard signal handler args
frame = args[1] if args else _frame().f_back
set_trace(frame)
if os.environ.get(envvar):
platforms.signals[sig] = rdb_handler


def install_HUP_not_supported_handler(worker, sig='SIGHUP'):

def warn_on_HUP_handler(signum, frame):
with in_sighandler():
safe_say('{sig} not supported: Restarting with {sig} is '
'unstable on this platform!'.format(sig=sig))
platforms.signals[sig] = warn_on_HUP_handler

+ 0
- 68
thesisenv/lib/python3.6/site-packages/celery/backends/__init__.py View File

# -*- coding: utf-8 -*-
"""
celery.backends
~~~~~~~~~~~~~~~

Backend abstract factory (...did I just say that?) and alias definitions.

"""
from __future__ import absolute_import

import sys
import types

from celery.exceptions import ImproperlyConfigured
from celery.local import Proxy
from celery._state import current_app
from celery.five import reraise
from celery.utils.imports import symbol_by_name

__all__ = ['get_backend_cls', 'get_backend_by_url']

UNKNOWN_BACKEND = """\
Unknown result backend: {0!r}. Did you spell that correctly? ({1!r})\
"""

BACKEND_ALIASES = {
'amqp': 'celery.backends.amqp:AMQPBackend',
'rpc': 'celery.backends.rpc.RPCBackend',
'cache': 'celery.backends.cache:CacheBackend',
'redis': 'celery.backends.redis:RedisBackend',
'mongodb': 'celery.backends.mongodb:MongoBackend',
'db': 'celery.backends.database:DatabaseBackend',
'database': 'celery.backends.database:DatabaseBackend',
'cassandra': 'celery.backends.cassandra:CassandraBackend',
'couchbase': 'celery.backends.couchbase:CouchBaseBackend',
'disabled': 'celery.backends.base:DisabledBackend',
}

#: deprecated alias to ``current_app.backend``.
default_backend = Proxy(lambda: current_app.backend)


def get_backend_cls(backend=None, loader=None):
"""Get backend class by name/alias"""
backend = backend or 'disabled'
loader = loader or current_app.loader
aliases = dict(BACKEND_ALIASES, **loader.override_backends)
try:
cls = symbol_by_name(backend, aliases)
except ValueError as exc:
reraise(ImproperlyConfigured, ImproperlyConfigured(
UNKNOWN_BACKEND.format(backend, exc)), sys.exc_info()[2])
if isinstance(cls, types.ModuleType):
raise ImproperlyConfigured(UNKNOWN_BACKEND.format(
backend, 'is a Python module, not a backend class.'))
return cls


def get_backend_by_url(backend=None, loader=None):
url = None
if backend and '://' in backend:
url = backend
scheme, _, _ = url.partition('://')
if '+' in scheme:
backend, url = url.split('+', 1)
else:
backend = scheme
return get_backend_cls(backend, loader), url

+ 0
- 317
thesisenv/lib/python3.6/site-packages/celery/backends/amqp.py View File

# -*- coding: utf-8 -*-
"""
celery.backends.amqp
~~~~~~~~~~~~~~~~~~~~

The AMQP result backend.

This backend publishes results as messages.

"""
from __future__ import absolute_import

import socket

from collections import deque
from operator import itemgetter

from kombu import Exchange, Queue, Producer, Consumer

from celery import states
from celery.exceptions import TimeoutError
from celery.five import range, monotonic
from celery.utils.functional import dictfilter
from celery.utils.log import get_logger
from celery.utils.timeutils import maybe_s_to_ms

from .base import BaseBackend

__all__ = ['BacklogLimitExceeded', 'AMQPBackend']

logger = get_logger(__name__)


class BacklogLimitExceeded(Exception):
"""Too much state history to fast-forward."""


def repair_uuid(s):
# Historically the dashes in UUIDS are removed from AMQ entity names,
# but there is no known reason to. Hopefully we'll be able to fix
# this in v4.0.
return '%s-%s-%s-%s-%s' % (s[:8], s[8:12], s[12:16], s[16:20], s[20:])


class NoCacheQueue(Queue):
can_cache_declaration = False


class AMQPBackend(BaseBackend):
"""Publishes results by sending messages."""
Exchange = Exchange
Queue = NoCacheQueue
Consumer = Consumer
Producer = Producer

BacklogLimitExceeded = BacklogLimitExceeded

persistent = True
supports_autoexpire = True
supports_native_join = True

retry_policy = {
'max_retries': 20,
'interval_start': 0,
'interval_step': 1,
'interval_max': 1,
}

def __init__(self, app, connection=None, exchange=None, exchange_type=None,
persistent=None, serializer=None, auto_delete=True, **kwargs):
super(AMQPBackend, self).__init__(app, **kwargs)
conf = self.app.conf
self._connection = connection
self.persistent = self.prepare_persistent(persistent)
self.delivery_mode = 2 if self.persistent else 1
exchange = exchange or conf.CELERY_RESULT_EXCHANGE
exchange_type = exchange_type or conf.CELERY_RESULT_EXCHANGE_TYPE
self.exchange = self._create_exchange(
exchange, exchange_type, self.delivery_mode,
)
self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER
self.auto_delete = auto_delete

self.expires = None
if 'expires' not in kwargs or kwargs['expires'] is not None:
self.expires = self.prepare_expires(kwargs.get('expires'))
self.queue_arguments = dictfilter({
'x-expires': maybe_s_to_ms(self.expires),
})

def _create_exchange(self, name, type='direct', delivery_mode=2):
return self.Exchange(name=name,
type=type,
delivery_mode=delivery_mode,
durable=self.persistent,
auto_delete=False)

def _create_binding(self, task_id):
name = self.rkey(task_id)
return self.Queue(name=name,
exchange=self.exchange,
routing_key=name,
durable=self.persistent,
auto_delete=self.auto_delete,
queue_arguments=self.queue_arguments)

def revive(self, channel):
pass

def rkey(self, task_id):
return task_id.replace('-', '')

def destination_for(self, task_id, request):
if request:
return self.rkey(task_id), request.correlation_id or task_id
return self.rkey(task_id), task_id

def store_result(self, task_id, result, status,
traceback=None, request=None, **kwargs):
"""Send task return value and status."""
routing_key, correlation_id = self.destination_for(task_id, request)
if not routing_key:
return
with self.app.amqp.producer_pool.acquire(block=True) as producer:
producer.publish(
{'task_id': task_id, 'status': status,
'result': self.encode_result(result, status),
'traceback': traceback,
'children': self.current_task_children(request)},
exchange=self.exchange,
routing_key=routing_key,
correlation_id=correlation_id,
serializer=self.serializer,
retry=True, retry_policy=self.retry_policy,
declare=self.on_reply_declare(task_id),
delivery_mode=self.delivery_mode,
)
return result

def on_reply_declare(self, task_id):
return [self._create_binding(task_id)]

def wait_for(self, task_id, timeout=None, cache=True,
no_ack=True, on_interval=None,
READY_STATES=states.READY_STATES,
PROPAGATE_STATES=states.PROPAGATE_STATES,
**kwargs):
cached_meta = self._cache.get(task_id)
if cache and cached_meta and \
cached_meta['status'] in READY_STATES:
return cached_meta
else:
try:
return self.consume(task_id, timeout=timeout, no_ack=no_ack,
on_interval=on_interval)
except socket.timeout:
raise TimeoutError('The operation timed out.')

def get_task_meta(self, task_id, backlog_limit=1000):
# Polling and using basic_get
with self.app.pool.acquire_channel(block=True) as (_, channel):
binding = self._create_binding(task_id)(channel)
binding.declare()

prev = latest = acc = None
for i in range(backlog_limit): # spool ffwd
acc = binding.get(
accept=self.accept, no_ack=False,
)
if not acc: # no more messages
break
if acc.payload['task_id'] == task_id:
prev, latest = latest, acc
if prev:
# backends are not expected to keep history,
# so we delete everything except the most recent state.
prev.ack()
prev = None
else:
raise self.BacklogLimitExceeded(task_id)

if latest:
payload = self._cache[task_id] = \
self.meta_from_decoded(latest.payload)
latest.requeue()
return payload
else:
# no new state, use previous
try:
return self._cache[task_id]
except KeyError:
# result probably pending.
return {'status': states.PENDING, 'result': None}
poll = get_task_meta # XXX compat

def drain_events(self, connection, consumer,
timeout=None, on_interval=None, now=monotonic, wait=None):
wait = wait or connection.drain_events
results = {}

def callback(meta, message):
if meta['status'] in states.READY_STATES:
results[meta['task_id']] = self.meta_from_decoded(meta)

consumer.callbacks[:] = [callback]
time_start = now()

while 1:
# Total time spent may exceed a single call to wait()
if timeout and now() - time_start >= timeout:
raise socket.timeout()
try:
wait(timeout=1)
except socket.timeout:
pass
if on_interval:
on_interval()
if results: # got event on the wanted channel.
break
self._cache.update(results)
return results

def consume(self, task_id, timeout=None, no_ack=True, on_interval=None):
wait = self.drain_events
with self.app.pool.acquire_channel(block=True) as (conn, channel):
binding = self._create_binding(task_id)
with self.Consumer(channel, binding,
no_ack=no_ack, accept=self.accept) as consumer:
while 1:
try:
return wait(
conn, consumer, timeout, on_interval)[task_id]
except KeyError:
continue

def _many_bindings(self, ids):
return [self._create_binding(task_id) for task_id in ids]

def get_many(self, task_ids, timeout=None, no_ack=True,
now=monotonic, getfields=itemgetter('status', 'task_id'),
READY_STATES=states.READY_STATES,
PROPAGATE_STATES=states.PROPAGATE_STATES, **kwargs):
with self.app.pool.acquire_channel(block=True) as (conn, channel):
ids = set(task_ids)
cached_ids = set()
mark_cached = cached_ids.add
for task_id in ids:
try:
cached = self._cache[task_id]
except KeyError:
pass
else:
if cached['status'] in READY_STATES:
yield task_id, cached
mark_cached(task_id)
ids.difference_update(cached_ids)
results = deque()
push_result = results.append
push_cache = self._cache.__setitem__
decode_result = self.meta_from_decoded

def on_message(message):
body = decode_result(message.decode())
state, uid = getfields(body)
if state in READY_STATES:
push_result(body) \
if uid in task_ids else push_cache(uid, body)

bindings = self._many_bindings(task_ids)
with self.Consumer(channel, bindings, on_message=on_message,
accept=self.accept, no_ack=no_ack):
wait = conn.drain_events
popleft = results.popleft
while ids:
wait(timeout=timeout)
while results:
state = popleft()
task_id = state['task_id']
ids.discard(task_id)
push_cache(task_id, state)
yield task_id, state

def reload_task_result(self, task_id):
raise NotImplementedError(
'reload_task_result is not supported by this backend.')

def reload_group_result(self, task_id):
"""Reload group result, even if it has been previously fetched."""
raise NotImplementedError(
'reload_group_result is not supported by this backend.')

def save_group(self, group_id, result):
raise NotImplementedError(
'save_group is not supported by this backend.')

def restore_group(self, group_id, cache=True):
raise NotImplementedError(
'restore_group is not supported by this backend.')

def delete_group(self, group_id):
raise NotImplementedError(
'delete_group is not supported by this backend.')

def as_uri(self, include_password=True):
return 'amqp://'

def __reduce__(self, args=(), kwargs={}):
kwargs.update(
connection=self._connection,
exchange=self.exchange.name,
exchange_type=self.exchange.type,
persistent=self.persistent,
serializer=self.serializer,
auto_delete=self.auto_delete,
expires=self.expires,
)
return super(AMQPBackend, self).__reduce__(args, kwargs)

+ 0
- 623
thesisenv/lib/python3.6/site-packages/celery/backends/base.py View File

# -*- coding: utf-8 -*-
"""
celery.backends.base
~~~~~~~~~~~~~~~~~~~~

Result backend base classes.

- :class:`BaseBackend` defines the interface.

- :class:`KeyValueStoreBackend` is a common base class
using K/V semantics like _get and _put.

"""
from __future__ import absolute_import

import time
import sys

from datetime import timedelta

from billiard.einfo import ExceptionInfo
from kombu.serialization import (
dumps, loads, prepare_accept_content,
registry as serializer_registry,
)
from kombu.utils.encoding import bytes_to_str, ensure_bytes, from_utf8
from kombu.utils.url import maybe_sanitize_url

from celery import states
from celery import current_app, maybe_signature
from celery.app import current_task
from celery.exceptions import ChordError, TimeoutError, TaskRevokedError
from celery.five import items
from celery.result import (
GroupResult, ResultBase, allow_join_result, result_from_tuple,
)
from celery.utils import timeutils
from celery.utils.functional import LRUCache
from celery.utils.log import get_logger
from celery.utils.serialization import (
get_pickled_exception,
get_pickleable_exception,
create_exception_cls,
)

__all__ = ['BaseBackend', 'KeyValueStoreBackend', 'DisabledBackend']

EXCEPTION_ABLE_CODECS = frozenset(['pickle'])
PY3 = sys.version_info >= (3, 0)

logger = get_logger(__name__)


def unpickle_backend(cls, args, kwargs):
"""Return an unpickled backend."""
return cls(*args, app=current_app._get_current_object(), **kwargs)


class _nulldict(dict):

def ignore(self, *a, **kw):
pass
__setitem__ = update = setdefault = ignore


class BaseBackend(object):
READY_STATES = states.READY_STATES
UNREADY_STATES = states.UNREADY_STATES
EXCEPTION_STATES = states.EXCEPTION_STATES

TimeoutError = TimeoutError

#: Time to sleep between polling each individual item
#: in `ResultSet.iterate`. as opposed to the `interval`
#: argument which is for each pass.
subpolling_interval = None

#: If true the backend must implement :meth:`get_many`.
supports_native_join = False

#: If true the backend must automatically expire results.
#: The daily backend_cleanup periodic task will not be triggered
#: in this case.
supports_autoexpire = False

#: Set to true if the backend is peristent by default.
persistent = True

retry_policy = {
'max_retries': 20,
'interval_start': 0,
'interval_step': 1,
'interval_max': 1,
}

def __init__(self, app,
serializer=None, max_cached_results=None, accept=None,
url=None, **kwargs):
self.app = app
conf = self.app.conf
self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER
(self.content_type,
self.content_encoding,
self.encoder) = serializer_registry._encoders[self.serializer]
cmax = max_cached_results or conf.CELERY_MAX_CACHED_RESULTS
self._cache = _nulldict() if cmax == -1 else LRUCache(limit=cmax)
self.accept = prepare_accept_content(
conf.CELERY_ACCEPT_CONTENT if accept is None else accept,
)
self.url = url

def as_uri(self, include_password=False):
"""Return the backend as an URI, sanitizing the password or not"""
# when using maybe_sanitize_url(), "/" is added
# we're stripping it for consistency
if include_password:
return self.url
url = maybe_sanitize_url(self.url or '')
return url[:-1] if url.endswith(':///') else url

def mark_as_started(self, task_id, **meta):
"""Mark a task as started"""
return self.store_result(task_id, meta, status=states.STARTED)

def mark_as_done(self, task_id, result, request=None):
"""Mark task as successfully executed."""
return self.store_result(task_id, result,
status=states.SUCCESS, request=request)

def mark_as_failure(self, task_id, exc, traceback=None, request=None):
"""Mark task as executed with failure. Stores the exception."""
return self.store_result(task_id, exc, status=states.FAILURE,
traceback=traceback, request=request)

def chord_error_from_stack(self, callback, exc=None):
from celery import group
app = self.app
backend = app._tasks[callback.task].backend
try:
group(
[app.signature(errback)
for errback in callback.options.get('link_error') or []],
app=app,
).apply_async((callback.id, ))
except Exception as eb_exc:
return backend.fail_from_current_stack(callback.id, exc=eb_exc)
else:
return backend.fail_from_current_stack(callback.id, exc=exc)

def fail_from_current_stack(self, task_id, exc=None):
type_, real_exc, tb = sys.exc_info()
try:
exc = real_exc if exc is None else exc
ei = ExceptionInfo((type_, exc, tb))
self.mark_as_failure(task_id, exc, ei.traceback)
return ei
finally:
del(tb)

def mark_as_retry(self, task_id, exc, traceback=None, request=None):
"""Mark task as being retries. Stores the current
exception (if any)."""
return self.store_result(task_id, exc, status=states.RETRY,
traceback=traceback, request=request)

def mark_as_revoked(self, task_id, reason='', request=None):
return self.store_result(task_id, TaskRevokedError(reason),
status=states.REVOKED, traceback=None,
request=request)

def prepare_exception(self, exc, serializer=None):
"""Prepare exception for serialization."""
serializer = self.serializer if serializer is None else serializer
if serializer in EXCEPTION_ABLE_CODECS:
return get_pickleable_exception(exc)
return {'exc_type': type(exc).__name__, 'exc_message': str(exc)}

def exception_to_python(self, exc):
"""Convert serialized exception to Python exception."""
if exc:
if not isinstance(exc, BaseException):
exc = create_exception_cls(
from_utf8(exc['exc_type']), __name__)(exc['exc_message'])
if self.serializer in EXCEPTION_ABLE_CODECS:
exc = get_pickled_exception(exc)
return exc

def prepare_value(self, result):
"""Prepare value for storage."""
if self.serializer != 'pickle' and isinstance(result, ResultBase):
return result.as_tuple()
return result

def encode(self, data):
_, _, payload = dumps(data, serializer=self.serializer)
return payload

def meta_from_decoded(self, meta):
if meta['status'] in self.EXCEPTION_STATES:
meta['result'] = self.exception_to_python(meta['result'])
return meta

def decode_result(self, payload):
return self.meta_from_decoded(self.decode(payload))

def decode(self, payload):
payload = PY3 and payload or str(payload)
return loads(payload,
content_type=self.content_type,
content_encoding=self.content_encoding,
accept=self.accept)

def wait_for(self, task_id,
timeout=None, interval=0.5, no_ack=True, on_interval=None):
"""Wait for task and return its result.

If the task raises an exception, this exception
will be re-raised by :func:`wait_for`.

If `timeout` is not :const:`None`, this raises the
:class:`celery.exceptions.TimeoutError` exception if the operation
takes longer than `timeout` seconds.

"""

time_elapsed = 0.0

while 1:
meta = self.get_task_meta(task_id)
if meta['status'] in states.READY_STATES:
return meta
if on_interval:
on_interval()
# avoid hammering the CPU checking status.
time.sleep(interval)
time_elapsed += interval
if timeout and time_elapsed >= timeout:
raise TimeoutError('The operation timed out.')

def prepare_expires(self, value, type=None):
if value is None:
value = self.app.conf.CELERY_TASK_RESULT_EXPIRES
if isinstance(value, timedelta):
value = timeutils.timedelta_seconds(value)
if value is not None and type:
return type(value)
return value

def prepare_persistent(self, enabled=None):
if enabled is not None:
return enabled
p = self.app.conf.CELERY_RESULT_PERSISTENT
return self.persistent if p is None else p

def encode_result(self, result, status):
if isinstance(result, ExceptionInfo):
result = result.exception
if status in self.EXCEPTION_STATES and isinstance(result, Exception):
return self.prepare_exception(result)
else:
return self.prepare_value(result)

def is_cached(self, task_id):
return task_id in self._cache

def store_result(self, task_id, result, status,
traceback=None, request=None, **kwargs):
"""Update task state and result."""
result = self.encode_result(result, status)
self._store_result(task_id, result, status, traceback,
request=request, **kwargs)
return result

def forget(self, task_id):
self._cache.pop(task_id, None)
self._forget(task_id)

def _forget(self, task_id):
raise NotImplementedError('backend does not implement forget.')

def get_status(self, task_id):
"""Get the status of a task."""
return self.get_task_meta(task_id)['status']

def get_traceback(self, task_id):
"""Get the traceback for a failed task."""
return self.get_task_meta(task_id).get('traceback')

def get_result(self, task_id):
"""Get the result of a task."""
return self.get_task_meta(task_id).get('result')

def get_children(self, task_id):
"""Get the list of subtasks sent by a task."""
try:
return self.get_task_meta(task_id)['children']
except KeyError:
pass

def get_task_meta(self, task_id, cache=True):
if cache:
try:
return self._cache[task_id]
except KeyError:
pass

meta = self._get_task_meta_for(task_id)
if cache and meta.get('status') == states.SUCCESS:
self._cache[task_id] = meta
return meta

def reload_task_result(self, task_id):
"""Reload task result, even if it has been previously fetched."""
self._cache[task_id] = self.get_task_meta(task_id, cache=False)

def reload_group_result(self, group_id):
"""Reload group result, even if it has been previously fetched."""
self._cache[group_id] = self.get_group_meta(group_id, cache=False)

def get_group_meta(self, group_id, cache=True):
if cache:
try:
return self._cache[group_id]
except KeyError:
pass

meta = self._restore_group(group_id)
if cache and meta is not None:
self._cache[group_id] = meta
return meta

def restore_group(self, group_id, cache=True):
"""Get the result for a group."""
meta = self.get_group_meta(group_id, cache=cache)
if meta:
return meta['result']

def save_group(self, group_id, result):
"""Store the result of an executed group."""
return self._save_group(group_id, result)

def delete_group(self, group_id):
self._cache.pop(group_id, None)
return self._delete_group(group_id)

def cleanup(self):
"""Backend cleanup. Is run by
:class:`celery.task.DeleteExpiredTaskMetaTask`."""
pass

def process_cleanup(self):
"""Cleanup actions to do at the end of a task worker process."""
pass

def on_task_call(self, producer, task_id):
return {}

def on_chord_part_return(self, task, state, result, propagate=False):
pass

def fallback_chord_unlock(self, group_id, body, result=None,
countdown=1, **kwargs):
kwargs['result'] = [r.as_tuple() for r in result]
self.app.tasks['celery.chord_unlock'].apply_async(
(group_id, body, ), kwargs, countdown=countdown,
)

def apply_chord(self, header, partial_args, group_id, body, **options):
result = header(*partial_args, task_id=group_id)
self.fallback_chord_unlock(group_id, body, **options)
return result

def current_task_children(self, request=None):
request = request or getattr(current_task(), 'request', None)
if request:
return [r.as_tuple() for r in getattr(request, 'children', [])]

def __reduce__(self, args=(), kwargs={}):
return (unpickle_backend, (self.__class__, args, kwargs))
BaseDictBackend = BaseBackend # XXX compat


class KeyValueStoreBackend(BaseBackend):
key_t = ensure_bytes
task_keyprefix = 'celery-task-meta-'
group_keyprefix = 'celery-taskset-meta-'
chord_keyprefix = 'chord-unlock-'
implements_incr = False

def __init__(self, *args, **kwargs):
if hasattr(self.key_t, '__func__'):
self.key_t = self.key_t.__func__ # remove binding
self._encode_prefixes()
super(KeyValueStoreBackend, self).__init__(*args, **kwargs)
if self.implements_incr:
self.apply_chord = self._apply_chord_incr

def _encode_prefixes(self):
self.task_keyprefix = self.key_t(self.task_keyprefix)
self.group_keyprefix = self.key_t(self.group_keyprefix)
self.chord_keyprefix = self.key_t(self.chord_keyprefix)

def get(self, key):
raise NotImplementedError('Must implement the get method.')

def mget(self, keys):
raise NotImplementedError('Does not support get_many')

def set(self, key, value):
raise NotImplementedError('Must implement the set method.')

def delete(self, key):
raise NotImplementedError('Must implement the delete method')

def incr(self, key):
raise NotImplementedError('Does not implement incr')

def expire(self, key, value):
pass

def get_key_for_task(self, task_id, key=''):
"""Get the cache key for a task by id."""
key_t = self.key_t
return key_t('').join([
self.task_keyprefix, key_t(task_id), key_t(key),
])

def get_key_for_group(self, group_id, key=''):
"""Get the cache key for a group by id."""
key_t = self.key_t
return key_t('').join([
self.group_keyprefix, key_t(group_id), key_t(key),
])

def get_key_for_chord(self, group_id, key=''):
"""Get the cache key for the chord waiting on group with given id."""
key_t = self.key_t
return key_t('').join([
self.chord_keyprefix, key_t(group_id), key_t(key),
])

def _strip_prefix(self, key):
"""Takes bytes, emits string."""
key = self.key_t(key)
for prefix in self.task_keyprefix, self.group_keyprefix:
if key.startswith(prefix):
return bytes_to_str(key[len(prefix):])
return bytes_to_str(key)

def _filter_ready(self, values, READY_STATES=states.READY_STATES):
for k, v in values:
if v is not None:
v = self.decode_result(v)
if v['status'] in READY_STATES:
yield k, v

def _mget_to_results(self, values, keys):
if hasattr(values, 'items'):
# client returns dict so mapping preserved.
return dict((self._strip_prefix(k), v)
for k, v in self._filter_ready(items(values)))
else:
# client returns list so need to recreate mapping.
return dict((bytes_to_str(keys[i]), v)
for i, v in self._filter_ready(enumerate(values)))

def get_many(self, task_ids, timeout=None, interval=0.5, no_ack=True,
READY_STATES=states.READY_STATES):
interval = 0.5 if interval is None else interval
ids = task_ids if isinstance(task_ids, set) else set(task_ids)
cached_ids = set()
cache = self._cache
for task_id in ids:
try:
cached = cache[task_id]
except KeyError:
pass
else:
if cached['status'] in READY_STATES:
yield bytes_to_str(task_id), cached
cached_ids.add(task_id)

ids.difference_update(cached_ids)
iterations = 0
while ids:
keys = list(ids)
r = self._mget_to_results(self.mget([self.get_key_for_task(k)
for k in keys]), keys)
cache.update(r)
ids.difference_update(set(bytes_to_str(v) for v in r))
for key, value in items(r):
yield bytes_to_str(key), value
if timeout and iterations * interval >= timeout:
raise TimeoutError('Operation timed out ({0})'.format(timeout))
time.sleep(interval) # don't busy loop.
iterations += 1

def _forget(self, task_id):
self.delete(self.get_key_for_task(task_id))

def _store_result(self, task_id, result, status,
traceback=None, request=None, **kwargs):
meta = {'status': status, 'result': result, 'traceback': traceback,
'children': self.current_task_children(request)}
self.set(self.get_key_for_task(task_id), self.encode(meta))
return result

def _save_group(self, group_id, result):
self.set(self.get_key_for_group(group_id),
self.encode({'result': result.as_tuple()}))
return result

def _delete_group(self, group_id):
self.delete(self.get_key_for_group(group_id))

def _get_task_meta_for(self, task_id):
"""Get task metadata for a task by id."""
meta = self.get(self.get_key_for_task(task_id))
if not meta:
return {'status': states.PENDING, 'result': None}
return self.decode_result(meta)

def _restore_group(self, group_id):
"""Get task metadata for a task by id."""
meta = self.get(self.get_key_for_group(group_id))
# previously this was always pickled, but later this
# was extended to support other serializers, so the
# structure is kind of weird.
if meta:
meta = self.decode(meta)
result = meta['result']
meta['result'] = result_from_tuple(result, self.app)
return meta

def _apply_chord_incr(self, header, partial_args, group_id, body,
result=None, **options):
self.save_group(group_id, self.app.GroupResult(group_id, result))
return header(*partial_args, task_id=group_id)

def on_chord_part_return(self, task, state, result, propagate=None):
if not self.implements_incr:
return
app = self.app
if propagate is None:
propagate = app.conf.CELERY_CHORD_PROPAGATES
gid = task.request.group
if not gid:
return
key = self.get_key_for_chord(gid)
try:
deps = GroupResult.restore(gid, backend=task.backend)
except Exception as exc:
callback = maybe_signature(task.request.chord, app=app)
logger.error('Chord %r raised: %r', gid, exc, exc_info=1)
return self.chord_error_from_stack(
callback,
ChordError('Cannot restore group: {0!r}'.format(exc)),
)
if deps is None:
try:
raise ValueError(gid)
except ValueError as exc:
callback = maybe_signature(task.request.chord, app=app)
logger.error('Chord callback %r raised: %r', gid, exc,
exc_info=1)
return self.chord_error_from_stack(
callback,
ChordError('GroupResult {0} no longer exists'.format(gid)),
)
val = self.incr(key)
size = len(deps)
if val > size:
logger.warning('Chord counter incremented too many times for %r',
gid)
elif val == size:
callback = maybe_signature(task.request.chord, app=app)
j = deps.join_native if deps.supports_native_join else deps.join
try:
with allow_join_result():
ret = j(timeout=3.0, propagate=propagate)
except Exception as exc:
try:
culprit = next(deps._failed_join_report())
reason = 'Dependency {0.id} raised {1!r}'.format(
culprit, exc,
)
except StopIteration:
reason = repr(exc)

logger.error('Chord %r raised: %r', gid, reason, exc_info=1)
self.chord_error_from_stack(callback, ChordError(reason))
else:
try:
callback.delay(ret)
except Exception as exc:
logger.error('Chord %r raised: %r', gid, exc, exc_info=1)
self.chord_error_from_stack(
callback,
ChordError('Callback error: {0!r}'.format(exc)),
)
finally:
deps.delete()
self.client.delete(key)
else:
self.expire(key, 86400)


class DisabledBackend(BaseBackend):
_cache = {} # need this attribute to reset cache in tests.

def store_result(self, *args, **kwargs):
pass

def _is_disabled(self, *args, **kwargs):
raise NotImplementedError(
'No result backend configured. '
'Please see the documentation for more information.')

def as_uri(self, *args, **kwargs):
return 'disabled://'

get_state = get_status = get_result = get_traceback = _is_disabled
wait_for = get_many = _is_disabled

+ 0
- 161
thesisenv/lib/python3.6/site-packages/celery/backends/cache.py View File

# -*- coding: utf-8 -*-
"""
celery.backends.cache
~~~~~~~~~~~~~~~~~~~~~

Memcache and in-memory cache result backend.

"""
from __future__ import absolute_import

import sys

from kombu.utils import cached_property
from kombu.utils.encoding import bytes_to_str, ensure_bytes

from celery.exceptions import ImproperlyConfigured
from celery.utils.functional import LRUCache

from .base import KeyValueStoreBackend

__all__ = ['CacheBackend']

_imp = [None]

PY3 = sys.version_info[0] == 3

REQUIRES_BACKEND = """\
The memcached backend requires either pylibmc or python-memcached.\
"""

UNKNOWN_BACKEND = """\
The cache backend {0!r} is unknown,
Please use one of the following backends instead: {1}\
"""


def import_best_memcache():
if _imp[0] is None:
is_pylibmc, memcache_key_t = False, ensure_bytes
try:
import pylibmc as memcache
is_pylibmc = True
except ImportError:
try:
import memcache # noqa
except ImportError:
raise ImproperlyConfigured(REQUIRES_BACKEND)
if PY3:
memcache_key_t = bytes_to_str
_imp[0] = (is_pylibmc, memcache, memcache_key_t)
return _imp[0]


def get_best_memcache(*args, **kwargs):
is_pylibmc, memcache, key_t = import_best_memcache()
Client = _Client = memcache.Client

if not is_pylibmc:
def Client(*args, **kwargs): # noqa
kwargs.pop('behaviors', None)
return _Client(*args, **kwargs)

return Client, key_t


class DummyClient(object):

def __init__(self, *args, **kwargs):
self.cache = LRUCache(limit=5000)

def get(self, key, *args, **kwargs):
return self.cache.get(key)

def get_multi(self, keys):
cache = self.cache
return dict((k, cache[k]) for k in keys if k in cache)

def set(self, key, value, *args, **kwargs):
self.cache[key] = value

def delete(self, key, *args, **kwargs):
self.cache.pop(key, None)

def incr(self, key, delta=1):
return self.cache.incr(key, delta)


backends = {'memcache': get_best_memcache,
'memcached': get_best_memcache,
'pylibmc': get_best_memcache,
'memory': lambda: (DummyClient, ensure_bytes)}


class CacheBackend(KeyValueStoreBackend):
servers = None
supports_autoexpire = True
supports_native_join = True
implements_incr = True

def __init__(self, app, expires=None, backend=None,
options={}, url=None, **kwargs):
super(CacheBackend, self).__init__(app, **kwargs)
self.url = url

self.options = dict(self.app.conf.CELERY_CACHE_BACKEND_OPTIONS,
**options)

self.backend = url or backend or self.app.conf.CELERY_CACHE_BACKEND
if self.backend:
self.backend, _, servers = self.backend.partition('://')
self.servers = servers.rstrip('/').split(';')
self.expires = self.prepare_expires(expires, type=int)
try:
self.Client, self.key_t = backends[self.backend]()
except KeyError:
raise ImproperlyConfigured(UNKNOWN_BACKEND.format(
self.backend, ', '.join(backends)))
self._encode_prefixes() # rencode the keyprefixes

def get(self, key):
return self.client.get(key)

def mget(self, keys):
return self.client.get_multi(keys)

def set(self, key, value):
return self.client.set(key, value, self.expires)

def delete(self, key):
return self.client.delete(key)

def _apply_chord_incr(self, header, partial_args, group_id, body, **opts):
self.client.set(self.get_key_for_chord(group_id), 0, time=86400)
return super(CacheBackend, self)._apply_chord_incr(
header, partial_args, group_id, body, **opts
)

def incr(self, key):
return self.client.incr(key)

@cached_property
def client(self):
return self.Client(self.servers, **self.options)

def __reduce__(self, args=(), kwargs={}):
servers = ';'.join(self.servers)
backend = '{0}://{1}/'.format(self.backend, servers)
kwargs.update(
dict(backend=backend,
expires=self.expires,
options=self.options))
return super(CacheBackend, self).__reduce__(args, kwargs)

def as_uri(self, *args, **kwargs):
"""Return the backend as an URI.

This properly handles the case of multiple servers.

"""
servers = ';'.join(self.servers)
return '{0}://{1}/'.format(self.backend, servers)

+ 0
- 196
thesisenv/lib/python3.6/site-packages/celery/backends/cassandra.py View File

# -* coding: utf-8 -*-
"""
celery.backends.cassandra
~~~~~~~~~~~~~~~~~~~~~~~~~

Apache Cassandra result store backend.

"""
from __future__ import absolute_import

try: # pragma: no cover
import pycassa
from thrift import Thrift
C = pycassa.cassandra.ttypes
except ImportError: # pragma: no cover
pycassa = None # noqa

import socket
import time

from celery import states
from celery.exceptions import ImproperlyConfigured
from celery.five import monotonic
from celery.utils.log import get_logger
from celery.utils.timeutils import maybe_timedelta, timedelta_seconds

from .base import BaseBackend

__all__ = ['CassandraBackend']

logger = get_logger(__name__)


class CassandraBackend(BaseBackend):
"""Highly fault tolerant Cassandra backend.

.. attribute:: servers

List of Cassandra servers with format: ``hostname:port``.

:raises celery.exceptions.ImproperlyConfigured: if
module :mod:`pycassa` is not available.

"""
servers = []
keyspace = None
column_family = None
detailed_mode = False
_retry_timeout = 300
_retry_wait = 3
supports_autoexpire = True

def __init__(self, servers=None, keyspace=None, column_family=None,
cassandra_options=None, detailed_mode=False, **kwargs):
"""Initialize Cassandra backend.

Raises :class:`celery.exceptions.ImproperlyConfigured` if
the :setting:`CASSANDRA_SERVERS` setting is not set.

"""
super(CassandraBackend, self).__init__(**kwargs)

self.expires = kwargs.get('expires') or maybe_timedelta(
self.app.conf.CELERY_TASK_RESULT_EXPIRES)

if not pycassa:
raise ImproperlyConfigured(
'You need to install the pycassa library to use the '
'Cassandra backend. See https://github.com/pycassa/pycassa')

conf = self.app.conf
self.servers = (servers or
conf.get('CASSANDRA_SERVERS') or
self.servers)
self.keyspace = (keyspace or
conf.get('CASSANDRA_KEYSPACE') or
self.keyspace)
self.column_family = (column_family or
conf.get('CASSANDRA_COLUMN_FAMILY') or
self.column_family)
self.cassandra_options = dict(conf.get('CASSANDRA_OPTIONS') or {},
**cassandra_options or {})
self.detailed_mode = (detailed_mode or
conf.get('CASSANDRA_DETAILED_MODE') or
self.detailed_mode)
read_cons = conf.get('CASSANDRA_READ_CONSISTENCY') or 'LOCAL_QUORUM'
write_cons = conf.get('CASSANDRA_WRITE_CONSISTENCY') or 'LOCAL_QUORUM'
try:
self.read_consistency = getattr(pycassa.ConsistencyLevel,
read_cons)
except AttributeError:
self.read_consistency = pycassa.ConsistencyLevel.LOCAL_QUORUM
try:
self.write_consistency = getattr(pycassa.ConsistencyLevel,
write_cons)
except AttributeError:
self.write_consistency = pycassa.ConsistencyLevel.LOCAL_QUORUM

if not self.servers or not self.keyspace or not self.column_family:
raise ImproperlyConfigured(
'Cassandra backend not configured.')

self._column_family = None

def _retry_on_error(self, fun, *args, **kwargs):
ts = monotonic() + self._retry_timeout
while 1:
try:
return fun(*args, **kwargs)
except (pycassa.InvalidRequestException,
pycassa.TimedOutException,
pycassa.UnavailableException,
pycassa.AllServersUnavailable,
socket.error,
socket.timeout,
Thrift.TException) as exc:
if monotonic() > ts:
raise
logger.warning('Cassandra error: %r. Retrying...', exc)
time.sleep(self._retry_wait)

def _get_column_family(self):
if self._column_family is None:
conn = pycassa.ConnectionPool(self.keyspace,
server_list=self.servers,
**self.cassandra_options)
self._column_family = pycassa.ColumnFamily(
conn, self.column_family,
read_consistency_level=self.read_consistency,
write_consistency_level=self.write_consistency,
)
return self._column_family

def process_cleanup(self):
if self._column_family is not None:
self._column_family = None

def _store_result(self, task_id, result, status,
traceback=None, request=None, **kwargs):
"""Store return value and status of an executed task."""

def _do_store():
cf = self._get_column_family()
date_done = self.app.now()
meta = {'status': status,
'date_done': date_done.strftime('%Y-%m-%dT%H:%M:%SZ'),
'traceback': self.encode(traceback),
'result': self.encode(result),
'children': self.encode(
self.current_task_children(request),
)}
if self.detailed_mode:
cf.insert(task_id, {date_done: self.encode(meta)},
ttl=self.expires and timedelta_seconds(self.expires))
else:
cf.insert(task_id, meta,
ttl=self.expires and timedelta_seconds(self.expires))

return self._retry_on_error(_do_store)

def as_uri(self, include_password=True):
return 'cassandra://'

def _get_task_meta_for(self, task_id):
"""Get task metadata for a task by id."""

def _do_get():
cf = self._get_column_family()
try:
if self.detailed_mode:
row = cf.get(task_id, column_reversed=True, column_count=1)
obj = self.decode(list(row.values())[0])
else:
obj = cf.get(task_id)

meta = {
'task_id': task_id,
'status': obj['status'],
'result': self.decode(obj['result']),
'date_done': obj['date_done'],
'traceback': self.decode(obj['traceback']),
'children': self.decode(obj['children']),
}
except (KeyError, pycassa.NotFoundException):
meta = {'status': states.PENDING, 'result': None}
return meta

return self._retry_on_error(_do_get)

def __reduce__(self, args=(), kwargs={}):
kwargs.update(
dict(servers=self.servers,
keyspace=self.keyspace,
column_family=self.column_family,
cassandra_options=self.cassandra_options))
return super(CassandraBackend, self).__reduce__(args, kwargs)

+ 0
- 116
thesisenv/lib/python3.6/site-packages/celery/backends/couchbase.py View File

# -*- coding: utf-8 -*-
"""
celery.backends.couchbase
~~~~~~~~~~~~~~~~~~~~~~~~~

CouchBase result store backend.

"""
from __future__ import absolute_import

import logging

try:
from couchbase import Couchbase
from couchbase.connection import Connection
from couchbase.exceptions import NotFoundError
except ImportError:
Couchbase = Connection = NotFoundError = None # noqa

from kombu.utils.url import _parse_url

from celery.exceptions import ImproperlyConfigured
from celery.utils.timeutils import maybe_timedelta

from .base import KeyValueStoreBackend

__all__ = ['CouchBaseBackend']


class CouchBaseBackend(KeyValueStoreBackend):
"""CouchBase backend.

:raises celery.exceptions.ImproperlyConfigured: if
module :mod:`couchbase` is not available.

"""
bucket = 'default'
host = 'localhost'
port = 8091
username = None
password = None
quiet = False
conncache = None
unlock_gil = True
timeout = 2.5
transcoder = None

def __init__(self, url=None, *args, **kwargs):
super(CouchBaseBackend, self).__init__(*args, **kwargs)
self.url = url

self.expires = kwargs.get('expires') or maybe_timedelta(
self.app.conf.CELERY_TASK_RESULT_EXPIRES)

if Couchbase is None:
raise ImproperlyConfigured(
'You need to install the couchbase library to use the '
'CouchBase backend.',
)

uhost = uport = uname = upass = ubucket = None
if url:
_, uhost, uport, uname, upass, ubucket, _ = _parse_url(url)
ubucket = ubucket.strip('/') if ubucket else None

config = self.app.conf.get('CELERY_COUCHBASE_BACKEND_SETTINGS', None)
if config is not None:
if not isinstance(config, dict):
raise ImproperlyConfigured(
'Couchbase backend settings should be grouped in a dict',
)
else:
config = {}

self.host = uhost or config.get('host', self.host)
self.port = int(uport or config.get('port', self.port))
self.bucket = ubucket or config.get('bucket', self.bucket)
self.username = uname or config.get('username', self.username)
self.password = upass or config.get('password', self.password)

self._connection = None

def _get_connection(self):
"""Connect to the Couchbase server."""
if self._connection is None:
kwargs = {'bucket': self.bucket, 'host': self.host}

if self.port:
kwargs.update({'port': self.port})
if self.username:
kwargs.update({'username': self.username})
if self.password:
kwargs.update({'password': self.password})

logging.debug('couchbase settings %r', kwargs)
self._connection = Connection(**kwargs)
return self._connection

@property
def connection(self):
return self._get_connection()

def get(self, key):
try:
return self.connection.get(key).value
except NotFoundError:
return None

def set(self, key, value):
self.connection.set(key, value)

def mget(self, keys):
return [self.get(key) for key in keys]

def delete(self, key):
self.connection.delete(key)

+ 0
- 201
thesisenv/lib/python3.6/site-packages/celery/backends/database/__init__.py View File

# -*- coding: utf-8 -*-
"""
celery.backends.database
~~~~~~~~~~~~~~~~~~~~~~~~

SQLAlchemy result store backend.

"""
from __future__ import absolute_import

import logging
from contextlib import contextmanager
from functools import wraps

from celery import states
from celery.backends.base import BaseBackend
from celery.exceptions import ImproperlyConfigured
from celery.five import range
from celery.utils.timeutils import maybe_timedelta

from .models import Task
from .models import TaskSet
from .session import SessionManager

logger = logging.getLogger(__name__)

__all__ = ['DatabaseBackend']


def _sqlalchemy_installed():
try:
import sqlalchemy
except ImportError:
raise ImproperlyConfigured(
'The database result backend requires SQLAlchemy to be installed.'
'See http://pypi.python.org/pypi/SQLAlchemy')
return sqlalchemy
_sqlalchemy_installed()

from sqlalchemy.exc import DatabaseError, InvalidRequestError # noqa
from sqlalchemy.orm.exc import StaleDataError # noqa


@contextmanager
def session_cleanup(session):
try:
yield
except Exception:
session.rollback()
raise
finally:
session.close()


def retry(fun):

@wraps(fun)
def _inner(*args, **kwargs):
max_retries = kwargs.pop('max_retries', 3)

for retries in range(max_retries):
try:
return fun(*args, **kwargs)
except (DatabaseError, InvalidRequestError, StaleDataError):
logger.warning(
"Failed operation %s. Retrying %s more times.",
fun.__name__, max_retries - retries - 1,
exc_info=True,
)
if retries + 1 >= max_retries:
raise

return _inner


class DatabaseBackend(BaseBackend):
"""The database result backend."""
# ResultSet.iterate should sleep this much between each pool,
# to not bombard the database with queries.
subpolling_interval = 0.5

def __init__(self, dburi=None, expires=None,
engine_options=None, url=None, **kwargs):
# The `url` argument was added later and is used by
# the app to set backend by url (celery.backends.get_backend_by_url)
super(DatabaseBackend, self).__init__(**kwargs)
conf = self.app.conf
self.expires = maybe_timedelta(self.prepare_expires(expires))
self.url = url or dburi or conf.CELERY_RESULT_DBURI
self.engine_options = dict(
engine_options or {},
**conf.CELERY_RESULT_ENGINE_OPTIONS or {})
self.short_lived_sessions = kwargs.get(
'short_lived_sessions',
conf.CELERY_RESULT_DB_SHORT_LIVED_SESSIONS,
)

tablenames = conf.CELERY_RESULT_DB_TABLENAMES or {}
Task.__table__.name = tablenames.get('task', 'celery_taskmeta')
TaskSet.__table__.name = tablenames.get('group', 'celery_tasksetmeta')

if not self.url:
raise ImproperlyConfigured(
'Missing connection string! Do you have '
'CELERY_RESULT_DBURI set to a real value?')

def ResultSession(self, session_manager=SessionManager()):
return session_manager.session_factory(
dburi=self.url,
short_lived_sessions=self.short_lived_sessions,
**self.engine_options
)

@retry
def _store_result(self, task_id, result, status,
traceback=None, max_retries=3, **kwargs):
"""Store return value and status of an executed task."""
session = self.ResultSession()
with session_cleanup(session):
task = list(session.query(Task).filter(Task.task_id == task_id))
task = task and task[0]
if not task:
task = Task(task_id)
session.add(task)
session.flush()
task.result = result
task.status = status
task.traceback = traceback
session.commit()
return result

@retry
def _get_task_meta_for(self, task_id):
"""Get task metadata for a task by id."""
session = self.ResultSession()
with session_cleanup(session):
task = list(session.query(Task).filter(Task.task_id == task_id))
task = task and task[0]
if not task:
task = Task(task_id)
task.status = states.PENDING
task.result = None
return self.meta_from_decoded(task.to_dict())

@retry
def _save_group(self, group_id, result):
"""Store the result of an executed group."""
session = self.ResultSession()
with session_cleanup(session):
group = TaskSet(group_id, result)
session.add(group)
session.flush()
session.commit()
return result

@retry
def _restore_group(self, group_id):
"""Get metadata for group by id."""
session = self.ResultSession()
with session_cleanup(session):
group = session.query(TaskSet).filter(
TaskSet.taskset_id == group_id).first()
if group:
return group.to_dict()

@retry
def _delete_group(self, group_id):
"""Delete metadata for group by id."""
session = self.ResultSession()
with session_cleanup(session):
session.query(TaskSet).filter(
TaskSet.taskset_id == group_id).delete()
session.flush()
session.commit()

@retry
def _forget(self, task_id):
"""Forget about result."""
session = self.ResultSession()
with session_cleanup(session):
session.query(Task).filter(Task.task_id == task_id).delete()
session.commit()

def cleanup(self):
"""Delete expired metadata."""
session = self.ResultSession()
expires = self.expires
now = self.app.now()
with session_cleanup(session):
session.query(Task).filter(
Task.date_done < (now - expires)).delete()
session.query(TaskSet).filter(
TaskSet.date_done < (now - expires)).delete()
session.commit()

def __reduce__(self, args=(), kwargs={}):
kwargs.update(
dict(dburi=self.url,
expires=self.expires,
engine_options=self.engine_options))
return super(DatabaseBackend, self).__reduce__(args, kwargs)

+ 0
- 74
thesisenv/lib/python3.6/site-packages/celery/backends/database/models.py View File

# -*- coding: utf-8 -*-
"""
celery.backends.database.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Database tables for the SQLAlchemy result store backend.

"""
from __future__ import absolute_import

from datetime import datetime

import sqlalchemy as sa
from sqlalchemy.types import PickleType

from celery import states

from .session import ResultModelBase

__all__ = ['Task', 'TaskSet']


class Task(ResultModelBase):
"""Task result/status."""
__tablename__ = 'celery_taskmeta'
__table_args__ = {'sqlite_autoincrement': True}

id = sa.Column(sa.Integer, sa.Sequence('task_id_sequence'),
primary_key=True,
autoincrement=True)
task_id = sa.Column(sa.String(255), unique=True)
status = sa.Column(sa.String(50), default=states.PENDING)
result = sa.Column(PickleType, nullable=True)
date_done = sa.Column(sa.DateTime, default=datetime.utcnow,
onupdate=datetime.utcnow, nullable=True)
traceback = sa.Column(sa.Text, nullable=True)

def __init__(self, task_id):
self.task_id = task_id

def to_dict(self):
return {'task_id': self.task_id,
'status': self.status,
'result': self.result,
'traceback': self.traceback,
'date_done': self.date_done}

def __repr__(self):
return '<Task {0.task_id} state: {0.status}>'.format(self)


class TaskSet(ResultModelBase):
"""TaskSet result"""
__tablename__ = 'celery_tasksetmeta'
__table_args__ = {'sqlite_autoincrement': True}

id = sa.Column(sa.Integer, sa.Sequence('taskset_id_sequence'),
autoincrement=True, primary_key=True)
taskset_id = sa.Column(sa.String(255), unique=True)
result = sa.Column(PickleType, nullable=True)
date_done = sa.Column(sa.DateTime, default=datetime.utcnow,
nullable=True)

def __init__(self, taskset_id, result):
self.taskset_id = taskset_id
self.result = result

def to_dict(self):
return {'taskset_id': self.taskset_id,
'result': self.result,
'date_done': self.date_done}

def __repr__(self):
return '<TaskSet: {0.taskset_id}>'.format(self)

+ 0
- 62
thesisenv/lib/python3.6/site-packages/celery/backends/database/session.py View File

# -*- coding: utf-8 -*-
"""
celery.backends.database.session
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

SQLAlchemy sessions.

"""
from __future__ import absolute_import

from billiard.util import register_after_fork

from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import NullPool

ResultModelBase = declarative_base()

__all__ = ['SessionManager']


class SessionManager(object):
def __init__(self):
self._engines = {}
self._sessions = {}
self.forked = False
self.prepared = False
register_after_fork(self, self._after_fork)

def _after_fork(self,):
self.forked = True

def get_engine(self, dburi, **kwargs):
if self.forked:
try:
return self._engines[dburi]
except KeyError:
engine = self._engines[dburi] = create_engine(dburi, **kwargs)
return engine
else:
kwargs['poolclass'] = NullPool
return create_engine(dburi, **kwargs)

def create_session(self, dburi, short_lived_sessions=False, **kwargs):
engine = self.get_engine(dburi, **kwargs)
if self.forked:
if short_lived_sessions or dburi not in self._sessions:
self._sessions[dburi] = sessionmaker(bind=engine)
return engine, self._sessions[dburi]
else:
return engine, sessionmaker(bind=engine)

def prepare_models(self, engine):
if not self.prepared:
ResultModelBase.metadata.create_all(engine)
self.prepared = True

def session_factory(self, dburi, **kwargs):
engine, session = self.create_session(dburi, **kwargs)
self.prepare_models(engine)
return session()

+ 0
- 264
thesisenv/lib/python3.6/site-packages/celery/backends/mongodb.py View File

# -*- coding: utf-8 -*-
"""
celery.backends.mongodb
~~~~~~~~~~~~~~~~~~~~~~~

MongoDB result store backend.

"""
from __future__ import absolute_import

from datetime import datetime

from kombu.syn import detect_environment
from kombu.utils import cached_property
from kombu.utils.url import maybe_sanitize_url

from celery import states
from celery.exceptions import ImproperlyConfigured
from celery.five import items, string_t
from celery.utils.timeutils import maybe_timedelta

from .base import BaseBackend

try:
import pymongo
except ImportError: # pragma: no cover
pymongo = None # noqa

if pymongo:
try:
from bson.binary import Binary
except ImportError: # pragma: no cover
from pymongo.binary import Binary # noqa
else: # pragma: no cover
Binary = None # noqa

__all__ = ['MongoBackend']


class MongoBackend(BaseBackend):
"""MongoDB result backend.

:raises celery.exceptions.ImproperlyConfigured: if
module :mod:`pymongo` is not available.

"""

host = 'localhost'
port = 27017
user = None
password = None
database_name = 'celery'
taskmeta_collection = 'celery_taskmeta'
max_pool_size = 10
options = None

supports_autoexpire = False

_connection = None

def __init__(self, app=None, url=None, **kwargs):
self.options = {}
super(MongoBackend, self).__init__(app, **kwargs)
self.expires = kwargs.get('expires') or maybe_timedelta(
self.app.conf.CELERY_TASK_RESULT_EXPIRES)

if not pymongo:
raise ImproperlyConfigured(
'You need to install the pymongo library to use the '
'MongoDB backend.')

config = self.app.conf.get('CELERY_MONGODB_BACKEND_SETTINGS')
if config is not None:
if not isinstance(config, dict):
raise ImproperlyConfigured(
'MongoDB backend settings should be grouped in a dict')
config = dict(config) # do not modify original

self.host = config.pop('host', self.host)
self.port = int(config.pop('port', self.port))
self.user = config.pop('user', self.user)
self.password = config.pop('password', self.password)
self.database_name = config.pop('database', self.database_name)
self.taskmeta_collection = config.pop(
'taskmeta_collection', self.taskmeta_collection,
)

self.options = dict(config, **config.pop('options', None) or {})

# Set option defaults
for key, value in items(self._prepare_client_options()):
self.options.setdefault(key, value)

self.url = url
if self.url:
# Specifying backend as an URL
self.host = self.url

def _prepare_client_options(self):
if pymongo.version_tuple >= (3, ):
return {'maxPoolSize': self.max_pool_size}
else: # pragma: no cover
options = {
'max_pool_size': self.max_pool_size,
'auto_start_request': False
}
if detect_environment() != 'default':
options['use_greenlets'] = True
return options

def _get_connection(self):
"""Connect to the MongoDB server."""
if self._connection is None:
from pymongo import MongoClient

# The first pymongo.Connection() argument (host) can be
# a list of ['host:port'] elements or a mongodb connection
# URI. If this is the case, don't use self.port
# but let pymongo get the port(s) from the URI instead.
# This enables the use of replica sets and sharding.
# See pymongo.Connection() for more info.
url = self.host
if isinstance(url, string_t) \
and not url.startswith('mongodb://'):
url = 'mongodb://{0}:{1}'.format(url, self.port)
if url == 'mongodb://':
url = url + 'localhost'
self._connection = MongoClient(host=url, **self.options)

return self._connection

def process_cleanup(self):
if self._connection is not None:
# MongoDB connection will be closed automatically when object
# goes out of scope
del(self.collection)
del(self.database)
self._connection = None

def _store_result(self, task_id, result, status,
traceback=None, request=None, **kwargs):
"""Store return value and status of an executed task."""
meta = {'_id': task_id,
'status': status,
'result': Binary(self.encode(result)),
'date_done': datetime.utcnow(),
'traceback': Binary(self.encode(traceback)),
'children': Binary(self.encode(
self.current_task_children(request),
))}
self.collection.save(meta)

return result

def _get_task_meta_for(self, task_id):
"""Get task metadata for a task by id."""

obj = self.collection.find_one({'_id': task_id})
if not obj:
return {'status': states.PENDING, 'result': None}

meta = {
'task_id': obj['_id'],
'status': obj['status'],
'result': self.decode(obj['result']),
'date_done': obj['date_done'],
'traceback': self.decode(obj['traceback']),
'children': self.decode(obj['children']),
}

return meta

def _save_group(self, group_id, result):
"""Save the group result."""
meta = {'_id': group_id,
'result': Binary(self.encode(result)),
'date_done': datetime.utcnow()}
self.collection.save(meta)

return result

def _restore_group(self, group_id):
"""Get the result for a group by id."""
obj = self.collection.find_one({'_id': group_id})
if not obj:
return

meta = {
'task_id': obj['_id'],
'result': self.decode(obj['result']),
'date_done': obj['date_done'],
}

return meta

def _delete_group(self, group_id):
"""Delete a group by id."""
self.collection.remove({'_id': group_id})

def _forget(self, task_id):
"""Remove result from MongoDB.

:raises celery.exceptions.OperationsError:
if the task_id could not be removed.

"""
# By using safe=True, this will wait until it receives a response from
# the server. Likewise, it will raise an OperationsError if the
# response was unable to be completed.
self.collection.remove({'_id': task_id})

def cleanup(self):
"""Delete expired metadata."""
self.collection.remove(
{'date_done': {'$lt': self.app.now() - self.expires}},
)

def __reduce__(self, args=(), kwargs={}):
return super(MongoBackend, self).__reduce__(
args, dict(kwargs, expires=self.expires, url=self.url),
)

def _get_database(self):
conn = self._get_connection()
db = conn[self.database_name]
if self.user and self.password:
if not db.authenticate(self.user,
self.password):
raise ImproperlyConfigured(
'Invalid MongoDB username or password.')
return db

@cached_property
def database(self):
"""Get database from MongoDB connection and perform authentication
if necessary."""
return self._get_database()

@cached_property
def collection(self):
"""Get the metadata task collection."""
collection = self.database[self.taskmeta_collection]

# Ensure an index on date_done is there, if not process the index
# in the background. Once completed cleanup will be much faster
collection.ensure_index('date_done', background='true')
return collection

def as_uri(self, include_password=False):
"""Return the backend as an URI.

:keyword include_password: Censor passwords.

"""
if not self.url:
return 'mongodb://'
if include_password:
return self.url

if ',' not in self.url:
return maybe_sanitize_url(self.url)

uri1, remainder = self.url.split(',', 1)
return ','.join([maybe_sanitize_url(uri1), remainder])

+ 0
- 295
thesisenv/lib/python3.6/site-packages/celery/backends/redis.py View File

# -*- coding: utf-8 -*-
"""
celery.backends.redis
~~~~~~~~~~~~~~~~~~~~~

Redis result store backend.

"""
from __future__ import absolute_import

from functools import partial

from kombu.utils import cached_property, retry_over_time
from kombu.utils.url import _parse_url

from celery import states
from celery.canvas import maybe_signature
from celery.exceptions import ChordError, ImproperlyConfigured
from celery.five import string_t
from celery.utils import deprecated_property, strtobool
from celery.utils.functional import dictfilter
from celery.utils.log import get_logger
from celery.utils.timeutils import humanize_seconds

from .base import KeyValueStoreBackend

try:
import redis
from redis.exceptions import ConnectionError
from kombu.transport.redis import get_redis_error_classes
except ImportError: # pragma: no cover
redis = None # noqa
ConnectionError = None # noqa
get_redis_error_classes = None # noqa

__all__ = ['RedisBackend']

REDIS_MISSING = """\
You need to install the redis library in order to use \
the Redis result store backend."""

logger = get_logger(__name__)
error = logger.error


class RedisBackend(KeyValueStoreBackend):
"""Redis task result store."""

#: redis-py client module.
redis = redis

#: Maximium number of connections in the pool.
max_connections = None

supports_autoexpire = True
supports_native_join = True
implements_incr = True

def __init__(self, host=None, port=None, db=None, password=None,
expires=None, max_connections=None, url=None,
connection_pool=None, new_join=False, **kwargs):
super(RedisBackend, self).__init__(**kwargs)
conf = self.app.conf
if self.redis is None:
raise ImproperlyConfigured(REDIS_MISSING)
self._client_capabilities = self._detect_client_capabilities()

# For compatibility with the old REDIS_* configuration keys.
def _get(key):
for prefix in 'CELERY_REDIS_{0}', 'REDIS_{0}':
try:
return conf[prefix.format(key)]
except KeyError:
pass
if host and '://' in host:
url = host
host = None

self.max_connections = (
max_connections or _get('MAX_CONNECTIONS') or self.max_connections
)
self._ConnectionPool = connection_pool

self.connparams = {
'host': _get('HOST') or 'localhost',
'port': _get('PORT') or 6379,
'db': _get('DB') or 0,
'password': _get('PASSWORD'),
'max_connections': self.max_connections,
}
if url:
self.connparams = self._params_from_url(url, self.connparams)
self.url = url
self.expires = self.prepare_expires(expires, type=int)

try:
new_join = strtobool(self.connparams.pop('new_join'))
except KeyError:
pass
if new_join:
self.apply_chord = self._new_chord_apply
self.on_chord_part_return = self._new_chord_return

self.connection_errors, self.channel_errors = (
get_redis_error_classes() if get_redis_error_classes
else ((), ()))

def _params_from_url(self, url, defaults):
scheme, host, port, user, password, path, query = _parse_url(url)
connparams = dict(
defaults, **dictfilter({
'host': host, 'port': port, 'password': password,
'db': query.pop('virtual_host', None)})
)

if scheme == 'socket':
# use 'path' as path to the socket… in this case
# the database number should be given in 'query'
connparams.update({
'connection_class': self.redis.UnixDomainSocketConnection,
'path': '/' + path,
})
# host+port are invalid options when using this connection type.
connparams.pop('host', None)
connparams.pop('port', None)
else:
connparams['db'] = path

# db may be string and start with / like in kombu.
db = connparams.get('db') or 0
db = db.strip('/') if isinstance(db, string_t) else db
connparams['db'] = int(db)

# Query parameters override other parameters
connparams.update(query)
return connparams

def get(self, key):
return self.client.get(key)

def mget(self, keys):
return self.client.mget(keys)

def ensure(self, fun, args, **policy):
retry_policy = dict(self.retry_policy, **policy)
max_retries = retry_policy.get('max_retries')
return retry_over_time(
fun, self.connection_errors, args, {},
partial(self.on_connection_error, max_retries),
**retry_policy
)

def on_connection_error(self, max_retries, exc, intervals, retries):
tts = next(intervals)
error('Connection to Redis lost: Retry (%s/%s) %s.',
retries, max_retries or 'Inf',
humanize_seconds(tts, 'in '))
return tts

def set(self, key, value, **retry_policy):
return self.ensure(self._set, (key, value), **retry_policy)

def _set(self, key, value):
with self.client.pipeline() as pipe:
if self.expires:
pipe.setex(key, value, self.expires)
else:
pipe.set(key, value)
pipe.publish(key, value)
pipe.execute()

def delete(self, key):
self.client.delete(key)

def incr(self, key):
return self.client.incr(key)

def expire(self, key, value):
return self.client.expire(key, value)

def _unpack_chord_result(self, tup, decode,
EXCEPTION_STATES=states.EXCEPTION_STATES,
PROPAGATE_STATES=states.PROPAGATE_STATES):
_, tid, state, retval = decode(tup)
if state in EXCEPTION_STATES:
retval = self.exception_to_python(retval)
if state in PROPAGATE_STATES:
raise ChordError('Dependency {0} raised {1!r}'.format(tid, retval))
return retval

def _new_chord_apply(self, header, partial_args, group_id, body,
result=None, **options):
# avoids saving the group in the redis db.
return header(*partial_args, task_id=group_id)

def _new_chord_return(self, task, state, result, propagate=None,
PROPAGATE_STATES=states.PROPAGATE_STATES):
app = self.app
if propagate is None:
propagate = self.app.conf.CELERY_CHORD_PROPAGATES
request = task.request
tid, gid = request.id, request.group
if not gid or not tid:
return

client = self.client
jkey = self.get_key_for_group(gid, '.j')
result = self.encode_result(result, state)
with client.pipeline() as pipe:
_, readycount, _ = pipe \
.rpush(jkey, self.encode([1, tid, state, result])) \
.llen(jkey) \
.expire(jkey, 86400) \
.execute()

try:
callback = maybe_signature(request.chord, app=app)
total = callback['chord_size']
if readycount == total:
decode, unpack = self.decode, self._unpack_chord_result
with client.pipeline() as pipe:
resl, _, = pipe \
.lrange(jkey, 0, total) \
.delete(jkey) \
.execute()
try:
callback.delay([unpack(tup, decode) for tup in resl])
except Exception as exc:
error('Chord callback for %r raised: %r',
request.group, exc, exc_info=1)
return self.chord_error_from_stack(
callback,
ChordError('Callback error: {0!r}'.format(exc)),
)
except ChordError as exc:
error('Chord %r raised: %r', request.group, exc, exc_info=1)
return self.chord_error_from_stack(callback, exc)
except Exception as exc:
error('Chord %r raised: %r', request.group, exc, exc_info=1)
return self.chord_error_from_stack(
callback, ChordError('Join error: {0!r}'.format(exc)),
)

def _detect_client_capabilities(self, socket_connect_timeout=False):
if self.redis.VERSION < (2, 4, 4):
raise ImproperlyConfigured(
'Redis backend requires redis-py versions 2.4.4 or later. '
'You have {0.__version__}'.format(redis))
if self.redis.VERSION >= (2, 10):
socket_connect_timeout = True
return {'socket_connect_timeout': socket_connect_timeout}

def _create_client(self, socket_timeout=None, socket_connect_timeout=None,
**params):
return self._new_redis_client(
socket_timeout=socket_timeout and float(socket_timeout),
socket_connect_timeout=socket_connect_timeout and float(
socket_connect_timeout), **params
)

def _new_redis_client(self, **params):
if not self._client_capabilities['socket_connect_timeout']:
params.pop('socket_connect_timeout', None)
return self.redis.Redis(connection_pool=self.ConnectionPool(**params))

@property
def ConnectionPool(self):
if self._ConnectionPool is None:
self._ConnectionPool = self.redis.ConnectionPool
return self._ConnectionPool

@cached_property
def client(self):
return self._create_client(**self.connparams)

def __reduce__(self, args=(), kwargs={}):
return super(RedisBackend, self).__reduce__(
(self.url, ), {'expires': self.expires},
)

@deprecated_property(3.2, 3.3)
def host(self):
return self.connparams['host']

@deprecated_property(3.2, 3.3)
def port(self):
return self.connparams['port']

@deprecated_property(3.2, 3.3)
def db(self):
return self.connparams['db']

@deprecated_property(3.2, 3.3)
def password(self):
return self.connparams['password']

+ 0
- 67
thesisenv/lib/python3.6/site-packages/celery/backends/rpc.py View File

# -*- coding: utf-8 -*-
"""
celery.backends.rpc
~~~~~~~~~~~~~~~~~~~

RPC-style result backend, using reply-to and one queue per client.

"""
from __future__ import absolute_import

from kombu import Consumer, Exchange
from kombu.common import maybe_declare
from kombu.utils import cached_property

from celery import current_task
from celery.backends import amqp

__all__ = ['RPCBackend']


class RPCBackend(amqp.AMQPBackend):
persistent = False

class Consumer(Consumer):
auto_declare = False

def _create_exchange(self, name, type='direct', delivery_mode=2):
# uses direct to queue routing (anon exchange).
return Exchange(None)

def on_task_call(self, producer, task_id):
maybe_declare(self.binding(producer.channel), retry=True)

def _create_binding(self, task_id):
return self.binding

def _many_bindings(self, ids):
return [self.binding]

def rkey(self, task_id):
return task_id

def destination_for(self, task_id, request):
# Request is a new argument for backends, so must still support
# old code that rely on current_task
try:
request = request or current_task.request
except AttributeError:
raise RuntimeError(
'RPC backend missing task request for {0!r}'.format(task_id),
)
return request.reply_to, request.correlation_id or task_id

def on_reply_declare(self, task_id):
pass

def as_uri(self, include_password=True):
return 'rpc://'

@property
def binding(self):
return self.Queue(self.oid, self.exchange, self.oid,
durable=False, auto_delete=False)

@cached_property
def oid(self):
return self.app.oid

+ 0
- 571
thesisenv/lib/python3.6/site-packages/celery/beat.py View File

# -*- coding: utf-8 -*-
"""
celery.beat
~~~~~~~~~~~

The periodic task scheduler.

"""
from __future__ import absolute_import

import errno
import os
import time
import shelve
import sys
import traceback

from threading import Event, Thread

from billiard import ensure_multiprocessing
from billiard.process import Process
from billiard.common import reset_signals
from kombu.utils import cached_property, reprcall
from kombu.utils.functional import maybe_evaluate

from . import __version__
from . import platforms
from . import signals
from .five import items, reraise, values, monotonic
from .schedules import maybe_schedule, crontab
from .utils.imports import instantiate
from .utils.timeutils import humanize_seconds
from .utils.log import get_logger, iter_open_logger_fds

__all__ = ['SchedulingError', 'ScheduleEntry', 'Scheduler',
'PersistentScheduler', 'Service', 'EmbeddedService']

logger = get_logger(__name__)
debug, info, error, warning = (logger.debug, logger.info,
logger.error, logger.warning)

DEFAULT_MAX_INTERVAL = 300 # 5 minutes


class SchedulingError(Exception):
"""An error occured while scheduling a task."""


class ScheduleEntry(object):
"""An entry in the scheduler.

:keyword name: see :attr:`name`.
:keyword schedule: see :attr:`schedule`.
:keyword args: see :attr:`args`.
:keyword kwargs: see :attr:`kwargs`.
:keyword options: see :attr:`options`.
:keyword last_run_at: see :attr:`last_run_at`.
:keyword total_run_count: see :attr:`total_run_count`.
:keyword relative: Is the time relative to when the server starts?

"""

#: The task name
name = None

#: The schedule (run_every/crontab)
schedule = None

#: Positional arguments to apply.
args = None

#: Keyword arguments to apply.
kwargs = None

#: Task execution options.
options = None

#: The time and date of when this task was last scheduled.
last_run_at = None

#: Total number of times this task has been scheduled.
total_run_count = 0

def __init__(self, name=None, task=None, last_run_at=None,
total_run_count=None, schedule=None, args=(), kwargs={},
options={}, relative=False, app=None):
self.app = app
self.name = name
self.task = task
self.args = args
self.kwargs = kwargs
self.options = options
self.schedule = maybe_schedule(schedule, relative, app=self.app)
self.last_run_at = last_run_at or self._default_now()
self.total_run_count = total_run_count or 0

def _default_now(self):
return self.schedule.now() if self.schedule else self.app.now()

def _next_instance(self, last_run_at=None):
"""Return a new instance of the same class, but with
its date and count fields updated."""
return self.__class__(**dict(
self,
last_run_at=last_run_at or self._default_now(),
total_run_count=self.total_run_count + 1,
))
__next__ = next = _next_instance # for 2to3

def __reduce__(self):
return self.__class__, (
self.name, self.task, self.last_run_at, self.total_run_count,
self.schedule, self.args, self.kwargs, self.options,
)

def update(self, other):
"""Update values from another entry.

Does only update "editable" fields (task, schedule, args, kwargs,
options).

"""
self.__dict__.update({'task': other.task, 'schedule': other.schedule,
'args': other.args, 'kwargs': other.kwargs,
'options': other.options})

def is_due(self):
"""See :meth:`~celery.schedule.schedule.is_due`."""
return self.schedule.is_due(self.last_run_at)

def __iter__(self):
return iter(items(vars(self)))

def __repr__(self):
return '<Entry: {0.name} {call} {0.schedule}'.format(
self,
call=reprcall(self.task, self.args or (), self.kwargs or {}),
)


class Scheduler(object):
"""Scheduler for periodic tasks.

The :program:`celery beat` program may instantiate this class
multiple times for introspection purposes, but then with the
``lazy`` argument set. It is important for subclasses to
be idempotent when this argument is set.

:keyword schedule: see :attr:`schedule`.
:keyword max_interval: see :attr:`max_interval`.
:keyword lazy: Do not set up the schedule.

"""
Entry = ScheduleEntry

#: The schedule dict/shelve.
schedule = None

#: Maximum time to sleep between re-checking the schedule.
max_interval = DEFAULT_MAX_INTERVAL

#: How often to sync the schedule (3 minutes by default)
sync_every = 3 * 60

#: How many tasks can be called before a sync is forced.
sync_every_tasks = None

_last_sync = None
_tasks_since_sync = 0

logger = logger # compat

def __init__(self, app, schedule=None, max_interval=None,
Publisher=None, lazy=False, sync_every_tasks=None, **kwargs):
self.app = app
self.data = maybe_evaluate({} if schedule is None else schedule)
self.max_interval = (max_interval or
app.conf.CELERYBEAT_MAX_LOOP_INTERVAL or
self.max_interval)
self.sync_every_tasks = (
app.conf.CELERYBEAT_SYNC_EVERY if sync_every_tasks is None
else sync_every_tasks)
self.Publisher = Publisher or app.amqp.TaskProducer
if not lazy:
self.setup_schedule()

def install_default_entries(self, data):
entries = {}
if self.app.conf.CELERY_TASK_RESULT_EXPIRES and \
not self.app.backend.supports_autoexpire:
if 'celery.backend_cleanup' not in data:
entries['celery.backend_cleanup'] = {
'task': 'celery.backend_cleanup',
'schedule': crontab('0', '4', '*'),
'options': {'expires': 12 * 3600}}
self.update_from_dict(entries)

def maybe_due(self, entry, publisher=None):
is_due, next_time_to_run = entry.is_due()

if is_due:
info('Scheduler: Sending due task %s (%s)', entry.name, entry.task)
try:
result = self.apply_async(entry, publisher=publisher)
except Exception as exc:
error('Message Error: %s\n%s',
exc, traceback.format_stack(), exc_info=True)
else:
debug('%s sent. id->%s', entry.task, result.id)
return next_time_to_run

def tick(self):
"""Run a tick, that is one iteration of the scheduler.

Executes all due tasks.

"""
remaining_times = []
try:
for entry in values(self.schedule):
next_time_to_run = self.maybe_due(entry, self.publisher)
if next_time_to_run:
remaining_times.append(next_time_to_run)
except RuntimeError:
pass

return min(remaining_times + [self.max_interval])

def should_sync(self):
return (
(not self._last_sync or
(monotonic() - self._last_sync) > self.sync_every) or
(self.sync_every_tasks and
self._tasks_since_sync >= self.sync_every_tasks)
)

def reserve(self, entry):
new_entry = self.schedule[entry.name] = next(entry)
return new_entry

def apply_async(self, entry, publisher=None, **kwargs):
# Update timestamps and run counts before we actually execute,
# so we have that done if an exception is raised (doesn't schedule
# forever.)
entry = self.reserve(entry)
task = self.app.tasks.get(entry.task)

try:
if task:
result = task.apply_async(entry.args, entry.kwargs,
publisher=publisher,
**entry.options)
else:
result = self.send_task(entry.task, entry.args, entry.kwargs,
publisher=publisher,
**entry.options)
except Exception as exc:
reraise(SchedulingError, SchedulingError(
"Couldn't apply scheduled task {0.name}: {exc}".format(
entry, exc=exc)), sys.exc_info()[2])
finally:
self._tasks_since_sync += 1
if self.should_sync():
self._do_sync()
return result

def send_task(self, *args, **kwargs):
return self.app.send_task(*args, **kwargs)

def setup_schedule(self):
self.install_default_entries(self.data)

def _do_sync(self):
try:
debug('beat: Synchronizing schedule...')
self.sync()
finally:
self._last_sync = monotonic()
self._tasks_since_sync = 0

def sync(self):
pass

def close(self):
self.sync()

def add(self, **kwargs):
entry = self.Entry(app=self.app, **kwargs)
self.schedule[entry.name] = entry
return entry

def _maybe_entry(self, name, entry):
if isinstance(entry, self.Entry):
entry.app = self.app
return entry
return self.Entry(**dict(entry, name=name, app=self.app))

def update_from_dict(self, dict_):
self.schedule.update(dict(
(name, self._maybe_entry(name, entry))
for name, entry in items(dict_)))

def merge_inplace(self, b):
schedule = self.schedule
A, B = set(schedule), set(b)

# Remove items from disk not in the schedule anymore.
for key in A ^ B:
schedule.pop(key, None)

# Update and add new items in the schedule
for key in B:
entry = self.Entry(**dict(b[key], name=key, app=self.app))
if schedule.get(key):
schedule[key].update(entry)
else:
schedule[key] = entry

def _ensure_connected(self):
# callback called for each retry while the connection
# can't be established.
def _error_handler(exc, interval):
error('beat: Connection error: %s. '
'Trying again in %s seconds...', exc, interval)

return self.connection.ensure_connection(
_error_handler, self.app.conf.BROKER_CONNECTION_MAX_RETRIES
)

def get_schedule(self):
return self.data

def set_schedule(self, schedule):
self.data = schedule
schedule = property(get_schedule, set_schedule)

@cached_property
def connection(self):
return self.app.connection()

@cached_property
def publisher(self):
return self.Publisher(self._ensure_connected())

@property
def info(self):
return ''


class PersistentScheduler(Scheduler):
persistence = shelve
known_suffixes = ('', '.db', '.dat', '.bak', '.dir')

_store = None

def __init__(self, *args, **kwargs):
self.schedule_filename = kwargs.get('schedule_filename')
Scheduler.__init__(self, *args, **kwargs)

def _remove_db(self):
for suffix in self.known_suffixes:
with platforms.ignore_errno(errno.ENOENT):
os.remove(self.schedule_filename + suffix)

def _open_schedule(self):
return self.persistence.open(self.schedule_filename, writeback=True)

def _destroy_open_corrupted_schedule(self, exc):
error('Removing corrupted schedule file %r: %r',
self.schedule_filename, exc, exc_info=True)
self._remove_db()
return self._open_schedule()

def setup_schedule(self):
try:
self._store = self._open_schedule()
# In some cases there may be different errors from a storage
# backend for corrupted files. Example - DBPageNotFoundError
# exception from bsddb. In such case the file will be
# successfully opened but the error will be raised on first key
# retrieving.
self._store.keys()
except Exception as exc:
self._store = self._destroy_open_corrupted_schedule(exc)

for _ in (1, 2):
try:
self._store['entries']
except KeyError:
# new schedule db
try:
self._store['entries'] = {}
except KeyError as exc:
self._store = self._destroy_open_corrupted_schedule(exc)
continue
else:
if '__version__' not in self._store:
warning('DB Reset: Account for new __version__ field')
self._store.clear() # remove schedule at 2.2.2 upgrade.
elif 'tz' not in self._store:
warning('DB Reset: Account for new tz field')
self._store.clear() # remove schedule at 3.0.8 upgrade
elif 'utc_enabled' not in self._store:
warning('DB Reset: Account for new utc_enabled field')
self._store.clear() # remove schedule at 3.0.9 upgrade
break

tz = self.app.conf.CELERY_TIMEZONE
stored_tz = self._store.get('tz')
if stored_tz is not None and stored_tz != tz:
warning('Reset: Timezone changed from %r to %r', stored_tz, tz)
self._store.clear() # Timezone changed, reset db!
utc = self.app.conf.CELERY_ENABLE_UTC
stored_utc = self._store.get('utc_enabled')
if stored_utc is not None and stored_utc != utc:
choices = {True: 'enabled', False: 'disabled'}
warning('Reset: UTC changed from %s to %s',
choices[stored_utc], choices[utc])
self._store.clear() # UTC setting changed, reset db!
entries = self._store.setdefault('entries', {})
self.merge_inplace(self.app.conf.CELERYBEAT_SCHEDULE)
self.install_default_entries(self.schedule)
self._store.update(__version__=__version__, tz=tz, utc_enabled=utc)
self.sync()
debug('Current schedule:\n' + '\n'.join(
repr(entry) for entry in values(entries)))

def get_schedule(self):
return self._store['entries']

def set_schedule(self, schedule):
self._store['entries'] = schedule
schedule = property(get_schedule, set_schedule)

def sync(self):
if self._store is not None:
self._store.sync()

def close(self):
self.sync()
self._store.close()

@property
def info(self):
return ' . db -> {self.schedule_filename}'.format(self=self)


class Service(object):
scheduler_cls = PersistentScheduler

def __init__(self, app, max_interval=None, schedule_filename=None,
scheduler_cls=None):
self.app = app
self.max_interval = (max_interval or
app.conf.CELERYBEAT_MAX_LOOP_INTERVAL)
self.scheduler_cls = scheduler_cls or self.scheduler_cls
self.schedule_filename = (
schedule_filename or app.conf.CELERYBEAT_SCHEDULE_FILENAME)

self._is_shutdown = Event()
self._is_stopped = Event()

def __reduce__(self):
return self.__class__, (self.max_interval, self.schedule_filename,
self.scheduler_cls, self.app)

def start(self, embedded_process=False, drift=-0.010):
info('beat: Starting...')
debug('beat: Ticking with max interval->%s',
humanize_seconds(self.scheduler.max_interval))

signals.beat_init.send(sender=self)
if embedded_process:
signals.beat_embedded_init.send(sender=self)
platforms.set_process_title('celery beat')

try:
while not self._is_shutdown.is_set():
interval = self.scheduler.tick()
interval = interval + drift if interval else interval
if interval and interval > 0:
debug('beat: Waking up %s.',
humanize_seconds(interval, prefix='in '))
time.sleep(interval)
if self.scheduler.should_sync():
self.scheduler._do_sync()
except (KeyboardInterrupt, SystemExit):
self._is_shutdown.set()
finally:
self.sync()

def sync(self):
self.scheduler.close()
self._is_stopped.set()

def stop(self, wait=False):
info('beat: Shutting down...')
self._is_shutdown.set()
wait and self._is_stopped.wait() # block until shutdown done.

def get_scheduler(self, lazy=False):
filename = self.schedule_filename
scheduler = instantiate(self.scheduler_cls,
app=self.app,
schedule_filename=filename,
max_interval=self.max_interval,
lazy=lazy)
return scheduler

@cached_property
def scheduler(self):
return self.get_scheduler()


class _Threaded(Thread):
"""Embedded task scheduler using threading."""

def __init__(self, app, **kwargs):
super(_Threaded, self).__init__()
self.app = app
self.service = Service(app, **kwargs)
self.daemon = True
self.name = 'Beat'

def run(self):
self.app.set_current()
self.service.start()

def stop(self):
self.service.stop(wait=True)


try:
ensure_multiprocessing()
except NotImplementedError: # pragma: no cover
_Process = None
else:
class _Process(Process): # noqa

def __init__(self, app, **kwargs):
super(_Process, self).__init__()
self.app = app
self.service = Service(app, **kwargs)
self.name = 'Beat'

def run(self):
reset_signals(full=False)
platforms.close_open_fds([
sys.__stdin__, sys.__stdout__, sys.__stderr__,
] + list(iter_open_logger_fds()))
self.app.set_default()
self.app.set_current()
self.service.start(embedded_process=True)

def stop(self):
self.service.stop()
self.terminate()


def EmbeddedService(app, max_interval=None, **kwargs):
"""Return embedded clock service.

:keyword thread: Run threaded instead of as a separate process.
Uses :mod:`multiprocessing` by default, if available.

"""
if kwargs.pop('thread', False) or _Process is None:
# Need short max interval to be able to stop thread
# in reasonable time.
return _Threaded(app, max_interval=1, **kwargs)
return _Process(app, max_interval=max_interval, **kwargs)

+ 0
- 5
thesisenv/lib/python3.6/site-packages/celery/bin/__init__.py View File

from __future__ import absolute_import

from .base import Option

__all__ = ['Option']

+ 0
- 380
thesisenv/lib/python3.6/site-packages/celery/bin/amqp.py View File

# -*- coding: utf-8 -*-
"""
The :program:`celery amqp` command.

.. program:: celery amqp

"""
from __future__ import absolute_import, print_function, unicode_literals

import cmd
import sys
import shlex
import pprint

from functools import partial
from itertools import count

from kombu.utils.encoding import safe_str

from celery.utils.functional import padlist

from celery.bin.base import Command
from celery.five import string_t
from celery.utils import strtobool

__all__ = ['AMQPAdmin', 'AMQShell', 'Spec', 'amqp']

# Map to coerce strings to other types.
COERCE = {bool: strtobool}

HELP_HEADER = """
Commands
--------
""".rstrip()

EXAMPLE_TEXT = """
Example:
-> queue.delete myqueue yes no
"""

say = partial(print, file=sys.stderr)


class Spec(object):
"""AMQP Command specification.

Used to convert arguments to Python values and display various help
and tooltips.

:param args: see :attr:`args`.
:keyword returns: see :attr:`returns`.

.. attribute args::

List of arguments this command takes. Should
contain `(argument_name, argument_type)` tuples.

.. attribute returns:

Helpful human string representation of what this command returns.
May be :const:`None`, to signify the return type is unknown.

"""
def __init__(self, *args, **kwargs):
self.args = args
self.returns = kwargs.get('returns')

def coerce(self, index, value):
"""Coerce value for argument at index."""
arg_info = self.args[index]
arg_type = arg_info[1]
# Might be a custom way to coerce the string value,
# so look in the coercion map.
return COERCE.get(arg_type, arg_type)(value)

def str_args_to_python(self, arglist):
"""Process list of string arguments to values according to spec.

e.g:

>>> spec = Spec([('queue', str), ('if_unused', bool)])
>>> spec.str_args_to_python('pobox', 'true')
('pobox', True)

"""
return tuple(
self.coerce(index, value) for index, value in enumerate(arglist))

def format_response(self, response):
"""Format the return value of this command in a human-friendly way."""
if not self.returns:
return 'ok.' if response is None else response
if callable(self.returns):
return self.returns(response)
return self.returns.format(response)

def format_arg(self, name, type, default_value=None):
if default_value is not None:
return '{0}:{1}'.format(name, default_value)
return name

def format_signature(self):
return ' '.join(self.format_arg(*padlist(list(arg), 3))
for arg in self.args)


def dump_message(message):
if message is None:
return 'No messages in queue. basic.publish something.'
return {'body': message.body,
'properties': message.properties,
'delivery_info': message.delivery_info}


def format_declare_queue(ret):
return 'ok. queue:{0} messages:{1} consumers:{2}.'.format(*ret)


class AMQShell(cmd.Cmd):
"""AMQP API Shell.

:keyword connect: Function used to connect to the server, must return
connection object.

:keyword silent: If :const:`True`, the commands won't have annoying
output not relevant when running in non-shell mode.


.. attribute: builtins

Mapping of built-in command names -> method names

.. attribute:: amqp

Mapping of AMQP API commands and their :class:`Spec`.

"""
conn = None
chan = None
prompt_fmt = '{self.counter}> '
identchars = cmd.IDENTCHARS = '.'
needs_reconnect = False
counter = 1
inc_counter = count(2)

builtins = {'EOF': 'do_exit',
'exit': 'do_exit',
'help': 'do_help'}

amqp = {
'exchange.declare': Spec(('exchange', str),
('type', str),
('passive', bool, 'no'),
('durable', bool, 'no'),
('auto_delete', bool, 'no'),
('internal', bool, 'no')),
'exchange.delete': Spec(('exchange', str),
('if_unused', bool)),
'queue.bind': Spec(('queue', str),
('exchange', str),
('routing_key', str)),
'queue.declare': Spec(('queue', str),
('passive', bool, 'no'),
('durable', bool, 'no'),
('exclusive', bool, 'no'),
('auto_delete', bool, 'no'),
returns=format_declare_queue),
'queue.delete': Spec(('queue', str),
('if_unused', bool, 'no'),
('if_empty', bool, 'no'),
returns='ok. {0} messages deleted.'),
'queue.purge': Spec(('queue', str),
returns='ok. {0} messages deleted.'),
'basic.get': Spec(('queue', str),
('no_ack', bool, 'off'),
returns=dump_message),
'basic.publish': Spec(('msg', str),
('exchange', str),
('routing_key', str),
('mandatory', bool, 'no'),
('immediate', bool, 'no')),
'basic.ack': Spec(('delivery_tag', int)),
}

def _prepare_spec(self, conn):
# XXX Hack to fix Issue #2013
from amqp import Connection, Message
if isinstance(conn.connection, Connection):
self.amqp['basic.publish'] = Spec(('msg', Message),
('exchange', str),
('routing_key', str),
('mandatory', bool, 'no'),
('immediate', bool, 'no'))

def __init__(self, *args, **kwargs):
self.connect = kwargs.pop('connect')
self.silent = kwargs.pop('silent', False)
self.out = kwargs.pop('out', sys.stderr)
cmd.Cmd.__init__(self, *args, **kwargs)
self._reconnect()

def note(self, m):
"""Say something to the user. Disabled if :attr:`silent`."""
if not self.silent:
say(m, file=self.out)

def say(self, m):
say(m, file=self.out)

def get_amqp_api_command(self, cmd, arglist):
"""With a command name and a list of arguments, convert the arguments
to Python values and find the corresponding method on the AMQP channel
object.

:returns: tuple of `(method, processed_args)`.

"""
spec = self.amqp[cmd]
args = spec.str_args_to_python(arglist)
attr_name = cmd.replace('.', '_')
if self.needs_reconnect:
self._reconnect()
return getattr(self.chan, attr_name), args, spec.format_response

def do_exit(self, *args):
"""The `'exit'` command."""
self.note("\n-> please, don't leave!")
sys.exit(0)

def display_command_help(self, cmd, short=False):
spec = self.amqp[cmd]
self.say('{0} {1}'.format(cmd, spec.format_signature()))

def do_help(self, *args):
if not args:
self.say(HELP_HEADER)
for cmd_name in self.amqp:
self.display_command_help(cmd_name, short=True)
self.say(EXAMPLE_TEXT)
else:
self.display_command_help(args[0])

def default(self, line):
self.say("unknown syntax: {0!r}. how about some 'help'?".format(line))

def get_names(self):
return set(self.builtins) | set(self.amqp)

def completenames(self, text, *ignored):
"""Return all commands starting with `text`, for tab-completion."""
names = self.get_names()
first = [cmd for cmd in names
if cmd.startswith(text.replace('_', '.'))]
if first:
return first
return [cmd for cmd in names
if cmd.partition('.')[2].startswith(text)]

def dispatch(self, cmd, argline):
"""Dispatch and execute the command.

Lookup order is: :attr:`builtins` -> :attr:`amqp`.

"""
arglist = shlex.split(safe_str(argline))
if cmd in self.builtins:
return getattr(self, self.builtins[cmd])(*arglist)
fun, args, formatter = self.get_amqp_api_command(cmd, arglist)
return formatter(fun(*args))

def parseline(self, line):
"""Parse input line.

:returns: tuple of three items:
`(command_name, arglist, original_line)`

"""
parts = line.split()
if parts:
return parts[0], ' '.join(parts[1:]), line
return '', '', line

def onecmd(self, line):
"""Parse line and execute command."""
cmd, arg, line = self.parseline(line)
if not line:
return self.emptyline()
self.lastcmd = line
self.counter = next(self.inc_counter)
try:
self.respond(self.dispatch(cmd, arg))
except (AttributeError, KeyError) as exc:
self.default(line)
except Exception as exc:
self.say(exc)
self.needs_reconnect = True

def respond(self, retval):
"""What to do with the return value of a command."""
if retval is not None:
if isinstance(retval, string_t):
self.say(retval)
else:
self.say(pprint.pformat(retval))

def _reconnect(self):
"""Re-establish connection to the AMQP server."""
self.conn = self.connect(self.conn)
self._prepare_spec(self.conn)
self.chan = self.conn.default_channel
self.needs_reconnect = False

@property
def prompt(self):
return self.prompt_fmt.format(self=self)


class AMQPAdmin(object):
"""The celery :program:`celery amqp` utility."""
Shell = AMQShell

def __init__(self, *args, **kwargs):
self.app = kwargs['app']
self.out = kwargs.setdefault('out', sys.stderr)
self.silent = kwargs.get('silent')
self.args = args

def connect(self, conn=None):
if conn:
conn.close()
conn = self.app.connection()
self.note('-> connecting to {0}.'.format(conn.as_uri()))
conn.connect()
self.note('-> connected.')
return conn

def run(self):
shell = self.Shell(connect=self.connect, out=self.out)
if self.args:
return shell.onecmd(' '.join(self.args))
try:
return shell.cmdloop()
except KeyboardInterrupt:
self.note('(bibi)')
pass

def note(self, m):
if not self.silent:
say(m, file=self.out)


class amqp(Command):
"""AMQP Administration Shell.

Also works for non-amqp transports (but not ones that
store declarations in memory).

Examples::

celery amqp
start shell mode
celery amqp help
show list of commands

celery amqp exchange.delete name
celery amqp queue.delete queue
celery amqp queue.delete queue yes yes

"""

def run(self, *args, **options):
options['app'] = self.app
return AMQPAdmin(*args, **options).run()


def main():
amqp().execute_from_commandline()

if __name__ == '__main__': # pragma: no cover
main()

+ 0
- 668
thesisenv/lib/python3.6/site-packages/celery/bin/base.py View File

# -*- coding: utf-8 -*-
"""

.. _preload-options:

Preload Options
---------------

These options are supported by all commands,
and usually parsed before command-specific arguments.

.. cmdoption:: -A, --app

app instance to use (e.g. module.attr_name)

.. cmdoption:: -b, --broker

url to broker. default is 'amqp://guest@localhost//'

.. cmdoption:: --loader

name of custom loader class to use.

.. cmdoption:: --config

Name of the configuration module

.. _daemon-options:

Daemon Options
--------------

These options are supported by commands that can detach
into the background (daemon). They will be present
in any command that also has a `--detach` option.

.. cmdoption:: -f, --logfile

Path to log file. If no logfile is specified, `stderr` is used.

.. cmdoption:: --pidfile

Optional file used to store the process pid.

The program will not start if this file already exists
and the pid is still alive.

.. cmdoption:: --uid

User id, or user name of the user to run as after detaching.

.. cmdoption:: --gid

Group id, or group name of the main group to change to after
detaching.

.. cmdoption:: --umask

Effective umask (in octal) of the process after detaching. Inherits
the umask of the parent process by default.

.. cmdoption:: --workdir

Optional directory to change to after detaching.

.. cmdoption:: --executable

Executable to use for the detached process.

"""
from __future__ import absolute_import, print_function, unicode_literals

import os
import random
import re
import sys
import warnings
import json

from collections import defaultdict
from heapq import heappush
from inspect import getargspec
from optparse import OptionParser, IndentedHelpFormatter, make_option as Option
from pprint import pformat

from celery import VERSION_BANNER, Celery, maybe_patch_concurrency
from celery import signals
from celery.exceptions import CDeprecationWarning, CPendingDeprecationWarning
from celery.five import items, string, string_t
from celery.platforms import EX_FAILURE, EX_OK, EX_USAGE
from celery.utils import term
from celery.utils import text
from celery.utils import node_format, host_format
from celery.utils.imports import symbol_by_name, import_from_cwd

try:
input = raw_input
except NameError:
pass

# always enable DeprecationWarnings, so our users can see them.
for warning in (CDeprecationWarning, CPendingDeprecationWarning):
warnings.simplefilter('once', warning, 0)

ARGV_DISABLED = """
Unrecognized command-line arguments: {0}

Try --help?
"""

find_long_opt = re.compile(r'.+?(--.+?)(?:\s|,|$)')
find_rst_ref = re.compile(r':\w+:`(.+?)`')

__all__ = ['Error', 'UsageError', 'Extensions', 'HelpFormatter',
'Command', 'Option', 'daemon_options']


class Error(Exception):
status = EX_FAILURE

def __init__(self, reason, status=None):
self.reason = reason
self.status = status if status is not None else self.status
super(Error, self).__init__(reason, status)

def __str__(self):
return self.reason
__unicode__ = __str__


class UsageError(Error):
status = EX_USAGE


class Extensions(object):

def __init__(self, namespace, register):
self.names = []
self.namespace = namespace
self.register = register

def add(self, cls, name):
heappush(self.names, name)
self.register(cls, name=name)

def load(self):
try:
from pkg_resources import iter_entry_points
except ImportError: # pragma: no cover
return

for ep in iter_entry_points(self.namespace):
sym = ':'.join([ep.module_name, ep.attrs[0]])
try:
cls = symbol_by_name(sym)
except (ImportError, SyntaxError) as exc:
warnings.warn(
'Cannot load extension {0!r}: {1!r}'.format(sym, exc))
else:
self.add(cls, ep.name)
return self.names


class HelpFormatter(IndentedHelpFormatter):

def format_epilog(self, epilog):
if epilog:
return '\n{0}\n\n'.format(epilog)
return ''

def format_description(self, description):
return text.ensure_2lines(text.fill_paragraphs(
text.dedent(description), self.width))


class Command(object):
"""Base class for command-line applications.

:keyword app: The current app.
:keyword get_app: Callable returning the current app if no app provided.

"""
Error = Error
UsageError = UsageError
Parser = OptionParser

#: Arg list used in help.
args = ''

#: Application version.
version = VERSION_BANNER

#: If false the parser will raise an exception if positional
#: args are provided.
supports_args = True

#: List of options (without preload options).
option_list = ()

# module Rst documentation to parse help from (if any)
doc = None

# Some programs (multi) does not want to load the app specified
# (Issue #1008).
respects_app_option = True

#: List of options to parse before parsing other options.
preload_options = (
Option('-A', '--app', default=None),
Option('-b', '--broker', default=None),
Option('--loader', default=None),
Option('--config', default=None),
Option('--workdir', default=None, dest='working_directory'),
Option('--no-color', '-C', action='store_true', default=None),
Option('--quiet', '-q', action='store_true'),
)

#: Enable if the application should support config from the cmdline.
enable_config_from_cmdline = False

#: Default configuration namespace.
namespace = 'celery'

#: Text to print at end of --help
epilog = None

#: Text to print in --help before option list.
description = ''

#: Set to true if this command doesn't have subcommands
leaf = True

# used by :meth:`say_remote_command_reply`.
show_body = True
# used by :meth:`say_chat`.
show_reply = True

prog_name = 'celery'

def __init__(self, app=None, get_app=None, no_color=False,
stdout=None, stderr=None, quiet=False, on_error=None,
on_usage_error=None):
self.app = app
self.get_app = get_app or self._get_default_app
self.stdout = stdout or sys.stdout
self.stderr = stderr or sys.stderr
self._colored = None
self._no_color = no_color
self.quiet = quiet
if not self.description:
self.description = self.__doc__
if on_error:
self.on_error = on_error
if on_usage_error:
self.on_usage_error = on_usage_error

def run(self, *args, **options):
"""This is the body of the command called by :meth:`handle_argv`."""
raise NotImplementedError('subclass responsibility')

def on_error(self, exc):
self.error(self.colored.red('Error: {0}'.format(exc)))

def on_usage_error(self, exc):
self.handle_error(exc)

def on_concurrency_setup(self):
pass

def __call__(self, *args, **kwargs):
random.seed() # maybe we were forked.
self.verify_args(args)
try:
ret = self.run(*args, **kwargs)
return ret if ret is not None else EX_OK
except self.UsageError as exc:
self.on_usage_error(exc)
return exc.status
except self.Error as exc:
self.on_error(exc)
return exc.status

def verify_args(self, given, _index=0):
S = getargspec(self.run)
_index = 1 if S.args and S.args[0] == 'self' else _index
required = S.args[_index:-len(S.defaults) if S.defaults else None]
missing = required[len(given):]
if missing:
raise self.UsageError('Missing required {0}: {1}'.format(
text.pluralize(len(missing), 'argument'),
', '.join(missing)
))

def execute_from_commandline(self, argv=None):
"""Execute application from command-line.

:keyword argv: The list of command-line arguments.
Defaults to ``sys.argv``.

"""
if argv is None:
argv = list(sys.argv)
# Should we load any special concurrency environment?
self.maybe_patch_concurrency(argv)
self.on_concurrency_setup()

# Dump version and exit if '--version' arg set.
self.early_version(argv)
argv = self.setup_app_from_commandline(argv)
self.prog_name = os.path.basename(argv[0])
return self.handle_argv(self.prog_name, argv[1:])

def run_from_argv(self, prog_name, argv=None, command=None):
return self.handle_argv(prog_name,
sys.argv if argv is None else argv, command)

def maybe_patch_concurrency(self, argv=None):
argv = argv or sys.argv
pool_option = self.with_pool_option(argv)
if pool_option:
maybe_patch_concurrency(argv, *pool_option)
short_opts, long_opts = pool_option

def usage(self, command):
return '%prog {0} [options] {self.args}'.format(command, self=self)

def get_options(self):
"""Get supported command-line options."""
return self.option_list

def expanduser(self, value):
if isinstance(value, string_t):
return os.path.expanduser(value)
return value

def ask(self, q, choices, default=None):
"""Prompt user to choose from a tuple of string values.

:param q: the question to ask (do not include questionark)
:param choice: tuple of possible choices, must be lowercase.
:param default: Default value if any.

If a default is not specified the question will be repeated
until the user gives a valid choice.

Matching is done case insensitively.

"""
schoices = choices
if default is not None:
schoices = [c.upper() if c == default else c.lower()
for c in choices]
schoices = '/'.join(schoices)

p = '{0} ({1})? '.format(q.capitalize(), schoices)
while 1:
val = input(p).lower()
if val in choices:
return val
elif default is not None:
break
return default

def handle_argv(self, prog_name, argv, command=None):
"""Parse command-line arguments from ``argv`` and dispatch
to :meth:`run`.

:param prog_name: The program name (``argv[0]``).
:param argv: Command arguments.

Exits with an error message if :attr:`supports_args` is disabled
and ``argv`` contains positional arguments.

"""
options, args = self.prepare_args(
*self.parse_options(prog_name, argv, command))
return self(*args, **options)

def prepare_args(self, options, args):
if options:
options = dict((k, self.expanduser(v))
for k, v in items(vars(options))
if not k.startswith('_'))
args = [self.expanduser(arg) for arg in args]
self.check_args(args)
return options, args

def check_args(self, args):
if not self.supports_args and args:
self.die(ARGV_DISABLED.format(', '.join(args)), EX_USAGE)

def error(self, s):
self.out(s, fh=self.stderr)

def out(self, s, fh=None):
print(s, file=fh or self.stdout)

def die(self, msg, status=EX_FAILURE):
self.error(msg)
sys.exit(status)

def early_version(self, argv):
if '--version' in argv:
print(self.version, file=self.stdout)
sys.exit(0)

def parse_options(self, prog_name, arguments, command=None):
"""Parse the available options."""
# Don't want to load configuration to just print the version,
# so we handle --version manually here.
self.parser = self.create_parser(prog_name, command)
return self.parser.parse_args(arguments)

def create_parser(self, prog_name, command=None):
option_list = (
self.preload_options +
self.get_options() +
tuple(self.app.user_options['preload'])
)
return self.prepare_parser(self.Parser(
prog=prog_name,
usage=self.usage(command),
version=self.version,
epilog=self.epilog,
formatter=HelpFormatter(),
description=self.description,
option_list=option_list,
))

def prepare_parser(self, parser):
docs = [self.parse_doc(doc) for doc in (self.doc, __doc__) if doc]
for doc in docs:
for long_opt, help in items(doc):
option = parser.get_option(long_opt)
if option is not None:
option.help = ' '.join(help).format(default=option.default)
return parser

def setup_app_from_commandline(self, argv):
preload_options = self.parse_preload_options(argv)
quiet = preload_options.get('quiet')
if quiet is not None:
self.quiet = quiet
try:
self.no_color = preload_options['no_color']
except KeyError:
pass
workdir = preload_options.get('working_directory')
if workdir:
os.chdir(workdir)
app = (preload_options.get('app') or
os.environ.get('CELERY_APP') or
self.app)
preload_loader = preload_options.get('loader')
if preload_loader:
# Default app takes loader from this env (Issue #1066).
os.environ['CELERY_LOADER'] = preload_loader
loader = (preload_loader,
os.environ.get('CELERY_LOADER') or
'default')
broker = preload_options.get('broker', None)
if broker:
os.environ['CELERY_BROKER_URL'] = broker
config = preload_options.get('config')
if config:
os.environ['CELERY_CONFIG_MODULE'] = config
if self.respects_app_option:
if app:
self.app = self.find_app(app)
elif self.app is None:
self.app = self.get_app(loader=loader)
if self.enable_config_from_cmdline:
argv = self.process_cmdline_config(argv)
else:
self.app = Celery(fixups=[])

user_preload = tuple(self.app.user_options['preload'] or ())
if user_preload:
user_options = self.preparse_options(argv, user_preload)
for user_option in user_preload:
user_options.setdefault(user_option.dest, user_option.default)
signals.user_preload_options.send(
sender=self, app=self.app, options=user_options,
)
return argv

def find_app(self, app):
from celery.app.utils import find_app
return find_app(app, symbol_by_name=self.symbol_by_name)

def symbol_by_name(self, name, imp=import_from_cwd):
return symbol_by_name(name, imp=imp)
get_cls_by_name = symbol_by_name # XXX compat

def process_cmdline_config(self, argv):
try:
cargs_start = argv.index('--')
except ValueError:
return argv
argv, cargs = argv[:cargs_start], argv[cargs_start + 1:]
self.app.config_from_cmdline(cargs, namespace=self.namespace)
return argv

def parse_preload_options(self, args):
return self.preparse_options(args, self.preload_options)

def add_append_opt(self, acc, opt, value):
acc.setdefault(opt.dest, opt.default or [])
acc[opt.dest].append(value)

def preparse_options(self, args, options):
acc = {}
opts = {}
for opt in options:
for t in (opt._long_opts, opt._short_opts):
opts.update(dict(zip(t, [opt] * len(t))))
index = 0
length = len(args)
while index < length:
arg = args[index]
if arg.startswith('--'):
if '=' in arg:
key, value = arg.split('=', 1)
opt = opts.get(key)
if opt:
if opt.action == 'append':
self.add_append_opt(acc, opt, value)
else:
acc[opt.dest] = value
else:
opt = opts.get(arg)
if opt and opt.takes_value():
# optparse also supports ['--opt', 'value']
# (Issue #1668)
if opt.action == 'append':
self.add_append_opt(acc, opt, args[index + 1])
else:
acc[opt.dest] = args[index + 1]
index += 1
elif opt and opt.action == 'store_true':
acc[opt.dest] = True
elif arg.startswith('-'):
opt = opts.get(arg)
if opt:
if opt.takes_value():
try:
acc[opt.dest] = args[index + 1]
except IndexError:
raise ValueError(
'Missing required argument for {0}'.format(
arg))
index += 1
elif opt.action == 'store_true':
acc[opt.dest] = True
index += 1
return acc

def parse_doc(self, doc):
options, in_option = defaultdict(list), None
for line in doc.splitlines():
if line.startswith('.. cmdoption::'):
m = find_long_opt.match(line)
if m:
in_option = m.groups()[0].strip()
assert in_option, 'missing long opt'
elif in_option and line.startswith(' ' * 4):
options[in_option].append(
find_rst_ref.sub(r'\1', line.strip()).replace('`', ''))
return options

def with_pool_option(self, argv):
"""Return tuple of ``(short_opts, long_opts)`` if the command
supports a pool argument, and used to monkey patch eventlet/gevent
environments as early as possible.

E.g::
has_pool_option = (['-P'], ['--pool'])
"""
pass

def node_format(self, s, nodename, **extra):
return node_format(s, nodename, **extra)

def host_format(self, s, **extra):
return host_format(s, **extra)

def _get_default_app(self, *args, **kwargs):
from celery._state import get_current_app
return get_current_app() # omit proxy

def pretty_list(self, n):
c = self.colored
if not n:
return '- empty -'
return '\n'.join(
str(c.reset(c.white('*'), ' {0}'.format(item))) for item in n
)

def pretty_dict_ok_error(self, n):
c = self.colored
try:
return (c.green('OK'),
text.indent(self.pretty(n['ok'])[1], 4))
except KeyError:
pass
return (c.red('ERROR'),
text.indent(self.pretty(n['error'])[1], 4))

def say_remote_command_reply(self, replies):
c = self.colored
node = next(iter(replies)) # <-- take first.
reply = replies[node]
status, preply = self.pretty(reply)
self.say_chat('->', c.cyan(node, ': ') + status,
text.indent(preply, 4) if self.show_reply else '')

def pretty(self, n):
OK = str(self.colored.green('OK'))
if isinstance(n, list):
return OK, self.pretty_list(n)
if isinstance(n, dict):
if 'ok' in n or 'error' in n:
return self.pretty_dict_ok_error(n)
else:
return OK, json.dumps(n, sort_keys=True, indent=4)
if isinstance(n, string_t):
return OK, string(n)
return OK, pformat(n)

def say_chat(self, direction, title, body=''):
c = self.colored
if direction == '<-' and self.quiet:
return
dirstr = not self.quiet and c.bold(c.white(direction), ' ') or ''
self.out(c.reset(dirstr, title))
if body and self.show_body:
self.out(body)

@property
def colored(self):
if self._colored is None:
self._colored = term.colored(enabled=not self.no_color)
return self._colored

@colored.setter
def colored(self, obj):
self._colored = obj

@property
def no_color(self):
return self._no_color

@no_color.setter
def no_color(self, value):
self._no_color = value
if self._colored is not None:
self._colored.enabled = not self._no_color


def daemon_options(default_pidfile=None, default_logfile=None):
return (
Option('-f', '--logfile', default=default_logfile),
Option('--pidfile', default=default_pidfile),
Option('--uid', default=None),
Option('--gid', default=None),
Option('--umask', default=None),
Option('--executable', default=None),
)

+ 0
- 100
thesisenv/lib/python3.6/site-packages/celery/bin/beat.py View File

# -*- coding: utf-8 -*-
"""

The :program:`celery beat` command.

.. program:: celery beat

.. seealso::

See :ref:`preload-options` and :ref:`daemon-options`.

.. cmdoption:: --detach

Detach and run in the background as a daemon.

.. cmdoption:: -s, --schedule

Path to the schedule database. Defaults to `celerybeat-schedule`.
The extension '.db' may be appended to the filename.
Default is {default}.

.. cmdoption:: -S, --scheduler

Scheduler class to use.
Default is :class:`celery.beat.PersistentScheduler`.

.. cmdoption:: --max-interval

Max seconds to sleep between schedule iterations.

.. cmdoption:: -f, --logfile

Path to log file. If no logfile is specified, `stderr` is used.

.. cmdoption:: -l, --loglevel

Logging level, choose between `DEBUG`, `INFO`, `WARNING`,
`ERROR`, `CRITICAL`, or `FATAL`.

"""
from __future__ import absolute_import

from functools import partial

from celery.platforms import detached, maybe_drop_privileges

from celery.bin.base import Command, Option, daemon_options

__all__ = ['beat']


class beat(Command):
"""Start the beat periodic task scheduler.

Examples::

celery beat -l info
celery beat -s /var/run/celery/beat-schedule --detach
celery beat -S djcelery.schedulers.DatabaseScheduler

"""
doc = __doc__
enable_config_from_cmdline = True
supports_args = False

def run(self, detach=False, logfile=None, pidfile=None, uid=None,
gid=None, umask=None, working_directory=None, **kwargs):
if not detach:
maybe_drop_privileges(uid=uid, gid=gid)
workdir = working_directory
kwargs.pop('app', None)
beat = partial(self.app.Beat,
logfile=logfile, pidfile=pidfile, **kwargs)

if detach:
with detached(logfile, pidfile, uid, gid, umask, workdir):
return beat().run()
else:
return beat().run()

def get_options(self):
c = self.app.conf

return (
(Option('--detach', action='store_true'),
Option('-s', '--schedule',
default=c.CELERYBEAT_SCHEDULE_FILENAME),
Option('--max-interval', type='float'),
Option('-S', '--scheduler', dest='scheduler_cls'),
Option('-l', '--loglevel', default=c.CELERYBEAT_LOG_LEVEL)) +
daemon_options(default_pidfile='celerybeat.pid') +
tuple(self.app.user_options['beat'])
)


def main(app=None):
beat(app=app).execute_from_commandline()

if __name__ == '__main__': # pragma: no cover
main()

+ 0
- 850
thesisenv/lib/python3.6/site-packages/celery/bin/celery.py View File

# -*- coding: utf-8 -*-
"""

The :program:`celery` umbrella command.

.. program:: celery

"""
from __future__ import absolute_import, unicode_literals

import anyjson
import numbers
import os
import sys

from functools import partial
from importlib import import_module

from celery.five import string_t, values
from celery.platforms import EX_OK, EX_FAILURE, EX_UNAVAILABLE, EX_USAGE
from celery.utils import term
from celery.utils import text
from celery.utils.timeutils import maybe_iso8601

# Cannot use relative imports here due to a Windows issue (#1111).
from celery.bin.base import Command, Option, Extensions

# Import commands from other modules
from celery.bin.amqp import amqp
from celery.bin.beat import beat
from celery.bin.events import events
from celery.bin.graph import graph
from celery.bin.worker import worker

__all__ = ['CeleryCommand', 'main']

HELP = """
---- -- - - ---- Commands- -------------- --- ------------

{commands}
---- -- - - --------- -- - -------------- --- ------------

Type '{prog_name} <command> --help' for help using a specific command.
"""

MIGRATE_PROGRESS_FMT = """\
Migrating task {state.count}/{state.strtotal}: \
{body[task]}[{body[id]}]\
"""

DEBUG = os.environ.get('C_DEBUG', False)

command_classes = [
('Main', ['worker', 'events', 'beat', 'shell', 'multi', 'amqp'], 'green'),
('Remote Control', ['status', 'inspect', 'control'], 'blue'),
('Utils', ['purge', 'list', 'migrate', 'call', 'result', 'report'], None),
]
if DEBUG: # pragma: no cover
command_classes.append(
('Debug', ['graph'], 'red'),
)


def determine_exit_status(ret):
if isinstance(ret, numbers.Integral):
return ret
return EX_OK if ret else EX_FAILURE


def main(argv=None):
# Fix for setuptools generated scripts, so that it will
# work with multiprocessing fork emulation.
# (see multiprocessing.forking.get_preparation_data())
try:
if __name__ != '__main__': # pragma: no cover
sys.modules['__main__'] = sys.modules[__name__]
cmd = CeleryCommand()
cmd.maybe_patch_concurrency()
from billiard import freeze_support
freeze_support()
cmd.execute_from_commandline(argv)
except KeyboardInterrupt:
pass


class multi(Command):
"""Start multiple worker instances."""
respects_app_option = False

def get_options(self):
return ()

def run_from_argv(self, prog_name, argv, command=None):
from celery.bin.multi import MultiTool
multi = MultiTool(quiet=self.quiet, no_color=self.no_color)
return multi.execute_from_commandline(
[command] + argv, prog_name,
)


class list_(Command):
"""Get info from broker.

Examples::

celery list bindings

NOTE: For RabbitMQ the management plugin is required.
"""
args = '[bindings]'

def list_bindings(self, management):
try:
bindings = management.get_bindings()
except NotImplementedError:
raise self.Error('Your transport cannot list bindings.')

def fmt(q, e, r):
return self.out('{0:<28} {1:<28} {2}'.format(q, e, r))
fmt('Queue', 'Exchange', 'Routing Key')
fmt('-' * 16, '-' * 16, '-' * 16)
for b in bindings:
fmt(b['destination'], b['source'], b['routing_key'])

def run(self, what=None, *_, **kw):
topics = {'bindings': self.list_bindings}
available = ', '.join(topics)
if not what:
raise self.UsageError(
'You must specify one of {0}'.format(available))
if what not in topics:
raise self.UsageError(
'unknown topic {0!r} (choose one of: {1})'.format(
what, available))
with self.app.connection() as conn:
self.app.amqp.TaskConsumer(conn).declare()
topics[what](conn.manager)


class call(Command):
"""Call a task by name.

Examples::

celery call tasks.add --args='[2, 2]'
celery call tasks.add --args='[2, 2]' --countdown=10
"""
args = '<task_name>'
option_list = Command.option_list + (
Option('--args', '-a', help='positional arguments (json).'),
Option('--kwargs', '-k', help='keyword arguments (json).'),
Option('--eta', help='scheduled time (ISO-8601).'),
Option('--countdown', type='float',
help='eta in seconds from now (float/int).'),
Option('--expires', help='expiry time (ISO-8601/float/int).'),
Option('--serializer', default='json', help='defaults to json.'),
Option('--queue', help='custom queue name.'),
Option('--exchange', help='custom exchange name.'),
Option('--routing-key', help='custom routing key.'),
)

def run(self, name, *_, **kw):
# Positional args.
args = kw.get('args') or ()
if isinstance(args, string_t):
args = anyjson.loads(args)

# Keyword args.
kwargs = kw.get('kwargs') or {}
if isinstance(kwargs, string_t):
kwargs = anyjson.loads(kwargs)

# Expires can be int/float.
expires = kw.get('expires') or None
try:
expires = float(expires)
except (TypeError, ValueError):
# or a string describing an ISO 8601 datetime.
try:
expires = maybe_iso8601(expires)
except (TypeError, ValueError):
raise

res = self.app.send_task(name, args=args, kwargs=kwargs,
countdown=kw.get('countdown'),
serializer=kw.get('serializer'),
queue=kw.get('queue'),
exchange=kw.get('exchange'),
routing_key=kw.get('routing_key'),
eta=maybe_iso8601(kw.get('eta')),
expires=expires)
self.out(res.id)


class purge(Command):
"""Erase all messages from all known task queues.

WARNING: There is no undo operation for this command.

"""
warn_prelude = (
'{warning}: This will remove all tasks from {queues}: {names}.\n'
' There is no undo for this operation!\n\n'
'(to skip this prompt use the -f option)\n'
)
warn_prompt = 'Are you sure you want to delete all tasks'
fmt_purged = 'Purged {mnum} {messages} from {qnum} known task {queues}.'
fmt_empty = 'No messages purged from {qnum} {queues}'
option_list = Command.option_list + (
Option('--force', '-f', action='store_true',
help='Do not prompt for verification'),
)

def run(self, force=False, **kwargs):
names = list(sorted(self.app.amqp.queues.keys()))
qnum = len(names)
if not force:
self.out(self.warn_prelude.format(
warning=self.colored.red('WARNING'),
queues=text.pluralize(qnum, 'queue'), names=', '.join(names),
))
if self.ask(self.warn_prompt, ('yes', 'no'), 'no') != 'yes':
return
messages = self.app.control.purge()
fmt = self.fmt_purged if messages else self.fmt_empty
self.out(fmt.format(
mnum=messages, qnum=qnum,
messages=text.pluralize(messages, 'message'),
queues=text.pluralize(qnum, 'queue')))


class result(Command):
"""Gives the return value for a given task id.

Examples::

celery result 8f511516-e2f5-4da4-9d2f-0fb83a86e500
celery result 8f511516-e2f5-4da4-9d2f-0fb83a86e500 -t tasks.add
celery result 8f511516-e2f5-4da4-9d2f-0fb83a86e500 --traceback

"""
args = '<task_id>'
option_list = Command.option_list + (
Option('--task', '-t', help='name of task (if custom backend)'),
Option('--traceback', action='store_true',
help='show traceback instead'),
)

def run(self, task_id, *args, **kwargs):
result_cls = self.app.AsyncResult
task = kwargs.get('task')
traceback = kwargs.get('traceback', False)

if task:
result_cls = self.app.tasks[task].AsyncResult
result = result_cls(task_id)
if traceback:
value = result.traceback
else:
value = result.get()
self.out(self.pretty(value)[1])


class _RemoteControl(Command):
name = None
choices = None
leaf = False
option_list = Command.option_list + (
Option('--timeout', '-t', type='float',
help='Timeout in seconds (float) waiting for reply'),
Option('--destination', '-d',
help='Comma separated list of destination node names.'))

def __init__(self, *args, **kwargs):
self.show_body = kwargs.pop('show_body', True)
self.show_reply = kwargs.pop('show_reply', True)
super(_RemoteControl, self).__init__(*args, **kwargs)

@classmethod
def get_command_info(self, command,
indent=0, prefix='', color=None, help=False):
if help:
help = '|' + text.indent(self.choices[command][1], indent + 4)
else:
help = None
try:
# see if it uses args.
meth = getattr(self, command)
return text.join([
'|' + text.indent('{0}{1} {2}'.format(
prefix, color(command), meth.__doc__), indent),
help,
])

except AttributeError:
return text.join([
'|' + text.indent(prefix + str(color(command)), indent), help,
])

@classmethod
def list_commands(self, indent=0, prefix='', color=None, help=False):
color = color if color else lambda x: x
prefix = prefix + ' ' if prefix else ''
return '\n'.join(self.get_command_info(c, indent, prefix, color, help)
for c in sorted(self.choices))

@property
def epilog(self):
return '\n'.join([
'[Commands]',
self.list_commands(indent=4, help=True)
])

def usage(self, command):
return '%prog {0} [options] {1} <command> [arg1 .. argN]'.format(
command, self.args)

def call(self, *args, **kwargs):
raise NotImplementedError('call')

def run(self, *args, **kwargs):
if not args:
raise self.UsageError(
'Missing {0.name} method. See --help'.format(self))
return self.do_call_method(args, **kwargs)

def do_call_method(self, args, **kwargs):
method = args[0]
if method == 'help':
raise self.Error("Did you mean '{0.name} --help'?".format(self))
if method not in self.choices:
raise self.UsageError(
'Unknown {0.name} method {1}'.format(self, method))

if self.app.connection().transport.driver_type == 'sql':
raise self.Error('Broadcast not supported by SQL broker transport')

destination = kwargs.get('destination')
timeout = kwargs.get('timeout') or self.choices[method][0]
if destination and isinstance(destination, string_t):
destination = [dest.strip() for dest in destination.split(',')]

handler = getattr(self, method, self.call)

replies = handler(method, *args[1:], timeout=timeout,
destination=destination,
callback=self.say_remote_command_reply)
if not replies:
raise self.Error('No nodes replied within time constraint.',
status=EX_UNAVAILABLE)
return replies


class inspect(_RemoteControl):
"""Inspect the worker at runtime.

Availability: RabbitMQ (amqp), Redis, and MongoDB transports.

Examples::

celery inspect active --timeout=5
celery inspect scheduled -d worker1@example.com
celery inspect revoked -d w1@e.com,w2@e.com

"""
name = 'inspect'
choices = {
'active': (1.0, 'dump active tasks (being processed)'),
'active_queues': (1.0, 'dump queues being consumed from'),
'scheduled': (1.0, 'dump scheduled tasks (eta/countdown/retry)'),
'reserved': (1.0, 'dump reserved tasks (waiting to be processed)'),
'stats': (1.0, 'dump worker statistics'),
'revoked': (1.0, 'dump of revoked task ids'),
'registered': (1.0, 'dump of registered tasks'),
'ping': (0.2, 'ping worker(s)'),
'clock': (1.0, 'get value of logical clock'),
'conf': (1.0, 'dump worker configuration'),
'report': (1.0, 'get bugreport info'),
'memsample': (1.0, 'sample memory (requires psutil)'),
'memdump': (1.0, 'dump memory samples (requires psutil)'),
'objgraph': (60.0, 'create object graph (requires objgraph)'),
}

def call(self, method, *args, **options):
i = self.app.control.inspect(**options)
return getattr(i, method)(*args)

def objgraph(self, type_='Request', *args, **kwargs):
return self.call('objgraph', type_, **kwargs)

def conf(self, with_defaults=False, *args, **kwargs):
return self.call('conf', with_defaults, **kwargs)


class control(_RemoteControl):
"""Workers remote control.

Availability: RabbitMQ (amqp), Redis, and MongoDB transports.

Examples::

celery control enable_events --timeout=5
celery control -d worker1@example.com enable_events
celery control -d w1.e.com,w2.e.com enable_events

celery control -d w1.e.com add_consumer queue_name
celery control -d w1.e.com cancel_consumer queue_name

celery control -d w1.e.com add_consumer queue exchange direct rkey

"""
name = 'control'
choices = {
'enable_events': (1.0, 'tell worker(s) to enable events'),
'disable_events': (1.0, 'tell worker(s) to disable events'),
'add_consumer': (1.0, 'tell worker(s) to start consuming a queue'),
'cancel_consumer': (1.0, 'tell worker(s) to stop consuming a queue'),
'rate_limit': (
1.0, 'tell worker(s) to modify the rate limit for a task type'),
'time_limit': (
1.0, 'tell worker(s) to modify the time limit for a task type.'),
'autoscale': (1.0, 'change autoscale settings'),
'pool_grow': (1.0, 'start more pool processes'),
'pool_shrink': (1.0, 'use less pool processes'),
}

def call(self, method, *args, **options):
return getattr(self.app.control, method)(*args, reply=True, **options)

def pool_grow(self, method, n=1, **kwargs):
"""[N=1]"""
return self.call(method, int(n), **kwargs)

def pool_shrink(self, method, n=1, **kwargs):
"""[N=1]"""
return self.call(method, int(n), **kwargs)

def autoscale(self, method, max=None, min=None, **kwargs):
"""[max] [min]"""
return self.call(method, int(max), int(min), **kwargs)

def rate_limit(self, method, task_name, rate_limit, **kwargs):
"""<task_name> <rate_limit> (e.g. 5/s | 5/m | 5/h)>"""
return self.call(method, task_name, rate_limit, **kwargs)

def time_limit(self, method, task_name, soft, hard=None, **kwargs):
"""<task_name> <soft_secs> [hard_secs]"""
return self.call(method, task_name,
float(soft), float(hard), **kwargs)

def add_consumer(self, method, queue, exchange=None,
exchange_type='direct', routing_key=None, **kwargs):
"""<queue> [exchange [type [routing_key]]]"""
return self.call(method, queue, exchange,
exchange_type, routing_key, **kwargs)

def cancel_consumer(self, method, queue, **kwargs):
"""<queue>"""
return self.call(method, queue, **kwargs)


class status(Command):
"""Show list of workers that are online."""
option_list = inspect.option_list

def run(self, *args, **kwargs):
I = inspect(
app=self.app,
no_color=kwargs.get('no_color', False),
stdout=self.stdout, stderr=self.stderr,
show_reply=False, show_body=False, quiet=True,
)
replies = I.run('ping', **kwargs)
if not replies:
raise self.Error('No nodes replied within time constraint',
status=EX_UNAVAILABLE)
nodecount = len(replies)
if not kwargs.get('quiet', False):
self.out('\n{0} {1} online.'.format(
nodecount, text.pluralize(nodecount, 'node')))


class migrate(Command):
"""Migrate tasks from one broker to another.

Examples::

celery migrate redis://localhost amqp://guest@localhost//
celery migrate django:// redis://localhost

NOTE: This command is experimental, make sure you have
a backup of the tasks before you continue.
"""
args = '<source_url> <dest_url>'
option_list = Command.option_list + (
Option('--limit', '-n', type='int',
help='Number of tasks to consume (int)'),
Option('--timeout', '-t', type='float', default=1.0,
help='Timeout in seconds (float) waiting for tasks'),
Option('--ack-messages', '-a', action='store_true',
help='Ack messages from source broker.'),
Option('--tasks', '-T',
help='List of task names to filter on.'),
Option('--queues', '-Q',
help='List of queues to migrate.'),
Option('--forever', '-F', action='store_true',
help='Continually migrate tasks until killed.'),
)
progress_fmt = MIGRATE_PROGRESS_FMT

def on_migrate_task(self, state, body, message):
self.out(self.progress_fmt.format(state=state, body=body))

def run(self, source, destination, **kwargs):
from kombu import Connection
from celery.contrib.migrate import migrate_tasks

migrate_tasks(Connection(source),
Connection(destination),
callback=self.on_migrate_task,
**kwargs)


class shell(Command): # pragma: no cover
"""Start shell session with convenient access to celery symbols.

The following symbols will be added to the main globals:

- celery: the current application.
- chord, group, chain, chunks,
xmap, xstarmap subtask, Task
- all registered tasks.

"""
option_list = Command.option_list + (
Option('--ipython', '-I',
action='store_true', dest='force_ipython',
help='force iPython.'),
Option('--bpython', '-B',
action='store_true', dest='force_bpython',
help='force bpython.'),
Option('--python', '-P',
action='store_true', dest='force_python',
help='force default Python shell.'),
Option('--without-tasks', '-T', action='store_true',
help="don't add tasks to locals."),
Option('--eventlet', action='store_true',
help='use eventlet.'),
Option('--gevent', action='store_true', help='use gevent.'),
)

def run(self, force_ipython=False, force_bpython=False,
force_python=False, without_tasks=False, eventlet=False,
gevent=False, **kwargs):
sys.path.insert(0, os.getcwd())
if eventlet:
import_module('celery.concurrency.eventlet')
if gevent:
import_module('celery.concurrency.gevent')
import celery
import celery.task.base
self.app.loader.import_default_modules()
self.locals = {'app': self.app,
'celery': self.app,
'Task': celery.Task,
'chord': celery.chord,
'group': celery.group,
'chain': celery.chain,
'chunks': celery.chunks,
'xmap': celery.xmap,
'xstarmap': celery.xstarmap,
'subtask': celery.subtask,
'signature': celery.signature}

if not without_tasks:
self.locals.update(dict(
(task.__name__, task) for task in values(self.app.tasks)
if not task.name.startswith('celery.')),
)

if force_python:
return self.invoke_fallback_shell()
elif force_bpython:
return self.invoke_bpython_shell()
elif force_ipython:
return self.invoke_ipython_shell()
return self.invoke_default_shell()

def invoke_default_shell(self):
try:
import IPython # noqa
except ImportError:
try:
import bpython # noqa
except ImportError:
return self.invoke_fallback_shell()
else:
return self.invoke_bpython_shell()
else:
return self.invoke_ipython_shell()

def invoke_fallback_shell(self):
import code
try:
import readline
except ImportError:
pass
else:
import rlcompleter
readline.set_completer(
rlcompleter.Completer(self.locals).complete)
readline.parse_and_bind('tab:complete')
code.interact(local=self.locals)

def invoke_ipython_shell(self):
for ip in (self._ipython, self._ipython_pre_10,
self._ipython_terminal, self._ipython_010,
self._no_ipython):
try:
return ip()
except ImportError:
pass

def _ipython(self):
from IPython import start_ipython
start_ipython(argv=[], user_ns=self.locals)

def _ipython_pre_10(self): # pragma: no cover
from IPython.frontend.terminal.ipapp import TerminalIPythonApp
app = TerminalIPythonApp.instance()
app.initialize(argv=[])
app.shell.user_ns.update(self.locals)
app.start()

def _ipython_terminal(self): # pragma: no cover
from IPython.terminal import embed
embed.TerminalInteractiveShell(user_ns=self.locals).mainloop()

def _ipython_010(self): # pragma: no cover
from IPython.Shell import IPShell
IPShell(argv=[], user_ns=self.locals).mainloop()

def _no_ipython(self): # pragma: no cover
raise ImportError("no suitable ipython found")

def invoke_bpython_shell(self):
import bpython
bpython.embed(self.locals)


class help(Command):
"""Show help screen and exit."""

def usage(self, command):
return '%prog <command> [options] {0.args}'.format(self)

def run(self, *args, **kwargs):
self.parser.print_help()
self.out(HELP.format(
prog_name=self.prog_name,
commands=CeleryCommand.list_commands(colored=self.colored),
))

return EX_USAGE


class report(Command):
"""Shows information useful to include in bugreports."""

def run(self, *args, **kwargs):
self.out(self.app.bugreport())
return EX_OK


class CeleryCommand(Command):
namespace = 'celery'
ext_fmt = '{self.namespace}.commands'
commands = {
'amqp': amqp,
'beat': beat,
'call': call,
'control': control,
'events': events,
'graph': graph,
'help': help,
'inspect': inspect,
'list': list_,
'migrate': migrate,
'multi': multi,
'purge': purge,
'report': report,
'result': result,
'shell': shell,
'status': status,
'worker': worker,

}
enable_config_from_cmdline = True
prog_name = 'celery'

@classmethod
def register_command(cls, fun, name=None):
cls.commands[name or fun.__name__] = fun
return fun

def execute(self, command, argv=None):
try:
cls = self.commands[command]
except KeyError:
cls, argv = self.commands['help'], ['help']
cls = self.commands.get(command) or self.commands['help']
try:
return cls(
app=self.app, on_error=self.on_error,
no_color=self.no_color, quiet=self.quiet,
on_usage_error=partial(self.on_usage_error, command=command),
).run_from_argv(self.prog_name, argv[1:], command=argv[0])
except self.UsageError as exc:
self.on_usage_error(exc)
return exc.status
except self.Error as exc:
self.on_error(exc)
return exc.status

def on_usage_error(self, exc, command=None):
if command:
helps = '{self.prog_name} {command} --help'
else:
helps = '{self.prog_name} --help'
self.error(self.colored.magenta('Error: {0}'.format(exc)))
self.error("""Please try '{0}'""".format(helps.format(
self=self, command=command,
)))

def _relocate_args_from_start(self, argv, index=0):
if argv:
rest = []
while index < len(argv):
value = argv[index]
if value.startswith('--'):
rest.append(value)
elif value.startswith('-'):
# we eat the next argument even though we don't know
# if this option takes an argument or not.
# instead we will assume what is the command name in the
# return statements below.
try:
nxt = argv[index + 1]
if nxt.startswith('-'):
# is another option
rest.append(value)
else:
# is (maybe) a value for this option
rest.extend([value, nxt])
index += 1
except IndexError:
rest.append(value)
break
else:
break
index += 1
if argv[index:]:
# if there are more arguments left then divide and swap
# we assume the first argument in argv[i:] is the command
# name.
return argv[index:] + rest
# if there are no more arguments then the last arg in rest'
# must be the command.
[rest.pop()] + rest
return []

def prepare_prog_name(self, name):
if name == '__main__.py':
return sys.modules['__main__'].__file__
return name

def handle_argv(self, prog_name, argv):
self.prog_name = self.prepare_prog_name(prog_name)
argv = self._relocate_args_from_start(argv)
_, argv = self.prepare_args(None, argv)
try:
command = argv[0]
except IndexError:
command, argv = 'help', ['help']
return self.execute(command, argv)

def execute_from_commandline(self, argv=None):
argv = sys.argv if argv is None else argv
if 'multi' in argv[1:3]: # Issue 1008
self.respects_app_option = False
try:
sys.exit(determine_exit_status(
super(CeleryCommand, self).execute_from_commandline(argv)))
except KeyboardInterrupt:
sys.exit(EX_FAILURE)

@classmethod
def get_command_info(self, command, indent=0, color=None, colored=None):
colored = term.colored() if colored is None else colored
colored = colored.names[color] if color else lambda x: x
obj = self.commands[command]
cmd = 'celery {0}'.format(colored(command))
if obj.leaf:
return '|' + text.indent(cmd, indent)
return text.join([
' ',
'|' + text.indent('{0} --help'.format(cmd), indent),
obj.list_commands(indent, 'celery {0}'.format(command), colored),
])

@classmethod
def list_commands(self, indent=0, colored=None):
colored = term.colored() if colored is None else colored
white = colored.white
ret = []
for cls, commands, color in command_classes:
ret.extend([
text.indent('+ {0}: '.format(white(cls)), indent),
'\n'.join(
self.get_command_info(command, indent + 4, color, colored)
for command in commands),
''
])
return '\n'.join(ret).strip()

def with_pool_option(self, argv):
if len(argv) > 1 and 'worker' in argv[0:3]:
# this command supports custom pools
# that may have to be loaded as early as possible.
return (['-P'], ['--pool'])

def on_concurrency_setup(self):
self.load_extension_commands()

def load_extension_commands(self):
names = Extensions(self.ext_fmt.format(self=self),
self.register_command).load()
if names:
command_classes.append(('Extensions', names, 'magenta'))


def command(*args, **kwargs):
"""Deprecated: Use classmethod :meth:`CeleryCommand.register_command`
instead."""
_register = CeleryCommand.register_command
return _register(args[0]) if args else _register


if __name__ == '__main__': # pragma: no cover
main()

+ 0
- 181
thesisenv/lib/python3.6/site-packages/celery/bin/celeryd_detach.py View File

# -*- coding: utf-8 -*-
"""
celery.bin.celeryd_detach
~~~~~~~~~~~~~~~~~~~~~~~~~

Program used to daemonize the worker

Using :func:`os.execv` because forking and multiprocessing
leads to weird issues (it was a long time ago now, but it
could have something to do with the threading mutex bug)

"""
from __future__ import absolute_import

import celery
import os
import sys

from optparse import OptionParser, BadOptionError

from celery.platforms import EX_FAILURE, detached
from celery.utils import default_nodename, node_format
from celery.utils.log import get_logger

from celery.bin.base import daemon_options, Option

__all__ = ['detached_celeryd', 'detach']

logger = get_logger(__name__)

C_FAKEFORK = os.environ.get('C_FAKEFORK')

OPTION_LIST = daemon_options(default_pidfile='celeryd.pid') + (
Option('--workdir', default=None, dest='working_directory'),
Option('-n', '--hostname'),
Option('--fake',
default=False, action='store_true', dest='fake',
help="Don't fork (for debugging purposes)"),
)


def detach(path, argv, logfile=None, pidfile=None, uid=None,
gid=None, umask=None, working_directory=None, fake=False, app=None,
executable=None, hostname=None):
hostname = default_nodename(hostname)
logfile = node_format(logfile, hostname)
pidfile = node_format(pidfile, hostname)
fake = 1 if C_FAKEFORK else fake
with detached(logfile, pidfile, uid, gid, umask, working_directory, fake,
after_forkers=False):
try:
if executable is not None:
path = executable
os.execv(path, [path] + argv)
except Exception:
if app is None:
from celery import current_app
app = current_app
app.log.setup_logging_subsystem(
'ERROR', logfile, hostname=hostname)
logger.critical("Can't exec %r", ' '.join([path] + argv),
exc_info=True)
return EX_FAILURE


class PartialOptionParser(OptionParser):

def __init__(self, *args, **kwargs):
self.leftovers = []
OptionParser.__init__(self, *args, **kwargs)

def _process_long_opt(self, rargs, values):
arg = rargs.pop(0)

if '=' in arg:
opt, next_arg = arg.split('=', 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False

try:
opt = self._match_long_opt(opt)
option = self._long_opt.get(opt)
except BadOptionError:
option = None

if option:
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error('{0} requires an argument'.format(opt))
else:
self.error('{0} requires {1} arguments'.format(
opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]

elif had_explicit_value:
self.error('{0} option does not take a value'.format(opt))
else:
value = None
option.process(opt, value, values, self)
else:
self.leftovers.append(arg)

def _process_short_opts(self, rargs, values):
arg = rargs[0]
try:
OptionParser._process_short_opts(self, rargs, values)
except BadOptionError:
self.leftovers.append(arg)
if rargs and not rargs[0][0] == '-':
self.leftovers.append(rargs.pop(0))


class detached_celeryd(object):
option_list = OPTION_LIST
usage = '%prog [options] [celeryd options]'
version = celery.VERSION_BANNER
description = ('Detaches Celery worker nodes. See `celery worker --help` '
'for the list of supported worker arguments.')
command = sys.executable
execv_path = sys.executable
if sys.version_info < (2, 7): # does not support pkg/__main__.py
execv_argv = ['-m', 'celery.__main__', 'worker']
else:
execv_argv = ['-m', 'celery', 'worker']

def __init__(self, app=None):
self.app = app

def Parser(self, prog_name):
return PartialOptionParser(prog=prog_name,
option_list=self.option_list,
usage=self.usage,
description=self.description,
version=self.version)

def parse_options(self, prog_name, argv):
parser = self.Parser(prog_name)
options, values = parser.parse_args(argv)
if options.logfile:
parser.leftovers.append('--logfile={0}'.format(options.logfile))
if options.pidfile:
parser.leftovers.append('--pidfile={0}'.format(options.pidfile))
if options.hostname:
parser.leftovers.append('--hostname={0}'.format(options.hostname))
return options, values, parser.leftovers

def execute_from_commandline(self, argv=None):
if argv is None:
argv = sys.argv
config = []
seen_cargs = 0
for arg in argv:
if seen_cargs:
config.append(arg)
else:
if arg == '--':
seen_cargs = 1
config.append(arg)
prog_name = os.path.basename(argv[0])
options, values, leftovers = self.parse_options(prog_name, argv[1:])
sys.exit(detach(
app=self.app, path=self.execv_path,
argv=self.execv_argv + leftovers + config,
**vars(options)
))


def main(app=None):
detached_celeryd(app).execute_from_commandline()

if __name__ == '__main__': # pragma: no cover
main()

+ 0
- 139
thesisenv/lib/python3.6/site-packages/celery/bin/events.py View File

# -*- coding: utf-8 -*-
"""

The :program:`celery events` command.

.. program:: celery events

.. seealso::

See :ref:`preload-options` and :ref:`daemon-options`.

.. cmdoption:: -d, --dump

Dump events to stdout.

.. cmdoption:: -c, --camera

Take snapshots of events using this camera.

.. cmdoption:: --detach

Camera: Detach and run in the background as a daemon.

.. cmdoption:: -F, --freq, --frequency

Camera: Shutter frequency. Default is every 1.0 seconds.

.. cmdoption:: -r, --maxrate

Camera: Optional shutter rate limit (e.g. 10/m).

.. cmdoption:: -l, --loglevel

Logging level, choose between `DEBUG`, `INFO`, `WARNING`,
`ERROR`, `CRITICAL`, or `FATAL`. Default is INFO.

"""
from __future__ import absolute_import, unicode_literals

import sys

from functools import partial

from celery.platforms import detached, set_process_title, strargv
from celery.bin.base import Command, Option, daemon_options

__all__ = ['events']


class events(Command):
"""Event-stream utilities.

Commands::

celery events --app=proj
start graphical monitor (requires curses)
celery events -d --app=proj
dump events to screen.
celery events -b amqp://
celery events -c <camera> [options]
run snapshot camera.

Examples::

celery events
celery events -d
celery events -c mod.attr -F 1.0 --detach --maxrate=100/m -l info
"""
doc = __doc__
supports_args = False

def run(self, dump=False, camera=None, frequency=1.0, maxrate=None,
loglevel='INFO', logfile=None, prog_name='celery events',
pidfile=None, uid=None, gid=None, umask=None,
working_directory=None, detach=False, **kwargs):
self.prog_name = prog_name

if dump:
return self.run_evdump()
if camera:
return self.run_evcam(camera, freq=frequency, maxrate=maxrate,
loglevel=loglevel, logfile=logfile,
pidfile=pidfile, uid=uid, gid=gid,
umask=umask,
working_directory=working_directory,
detach=detach)
return self.run_evtop()

def run_evdump(self):
from celery.events.dumper import evdump
self.set_process_status('dump')
return evdump(app=self.app)

def run_evtop(self):
from celery.events.cursesmon import evtop
self.set_process_status('top')
return evtop(app=self.app)

def run_evcam(self, camera, logfile=None, pidfile=None, uid=None,
gid=None, umask=None, working_directory=None,
detach=False, **kwargs):
from celery.events.snapshot import evcam
workdir = working_directory
self.set_process_status('cam')
kwargs['app'] = self.app
cam = partial(evcam, camera,
logfile=logfile, pidfile=pidfile, **kwargs)

if detach:
with detached(logfile, pidfile, uid, gid, umask, workdir):
return cam()
else:
return cam()

def set_process_status(self, prog, info=''):
prog = '{0}:{1}'.format(self.prog_name, prog)
info = '{0} {1}'.format(info, strargv(sys.argv))
return set_process_title(prog, info=info)

def get_options(self):
return (
(Option('-d', '--dump', action='store_true'),
Option('-c', '--camera'),
Option('--detach', action='store_true'),
Option('-F', '--frequency', '--freq',
type='float', default=1.0),
Option('-r', '--maxrate'),
Option('-l', '--loglevel', default='INFO')) +
daemon_options(default_pidfile='celeryev.pid') +
tuple(self.app.user_options['events'])
)


def main():
ev = events()
ev.execute_from_commandline()

if __name__ == '__main__': # pragma: no cover
main()

+ 0
- 191
thesisenv/lib/python3.6/site-packages/celery/bin/graph.py View File

# -*- coding: utf-8 -*-
"""

The :program:`celery graph` command.

.. program:: celery graph

"""
from __future__ import absolute_import, unicode_literals

from operator import itemgetter

from celery.datastructures import DependencyGraph, GraphFormatter
from celery.five import items

from .base import Command

__all__ = ['graph']


class graph(Command):
args = """<TYPE> [arguments]
..... bootsteps [worker] [consumer]
..... workers [enumerate]
"""

def run(self, what=None, *args, **kwargs):
map = {'bootsteps': self.bootsteps, 'workers': self.workers}
if not what:
raise self.UsageError('missing type')
elif what not in map:
raise self.Error('no graph {0} in {1}'.format(what, '|'.join(map)))
return map[what](*args, **kwargs)

def bootsteps(self, *args, **kwargs):
worker = self.app.WorkController()
include = set(arg.lower() for arg in args or ['worker', 'consumer'])
if 'worker' in include:
graph = worker.blueprint.graph
if 'consumer' in include:
worker.blueprint.connect_with(worker.consumer.blueprint)
else:
graph = worker.consumer.blueprint.graph
graph.to_dot(self.stdout)

def workers(self, *args, **kwargs):

def simplearg(arg):
return maybe_list(itemgetter(0, 2)(arg.partition(':')))

def maybe_list(l, sep=','):
return (l[0], l[1].split(sep) if sep in l[1] else l[1])

args = dict(simplearg(arg) for arg in args)
generic = 'generic' in args

def generic_label(node):
return '{0} ({1}://)'.format(type(node).__name__,
node._label.split('://')[0])

class Node(object):
force_label = None
scheme = {}

def __init__(self, label, pos=None):
self._label = label
self.pos = pos

def label(self):
return self._label

def __str__(self):
return self.label()

class Thread(Node):
scheme = {'fillcolor': 'lightcyan4', 'fontcolor': 'yellow',
'shape': 'oval', 'fontsize': 10, 'width': 0.3,
'color': 'black'}

def __init__(self, label, **kwargs):
self._label = 'thr-{0}'.format(next(tids))
self.real_label = label
self.pos = 0

class Formatter(GraphFormatter):

def label(self, obj):
return obj and obj.label()

def node(self, obj):
scheme = dict(obj.scheme) if obj.pos else obj.scheme
if isinstance(obj, Thread):
scheme['label'] = obj.real_label
return self.draw_node(
obj, dict(self.node_scheme, **scheme),
)

def terminal_node(self, obj):
return self.draw_node(
obj, dict(self.term_scheme, **obj.scheme),
)

def edge(self, a, b, **attrs):
if isinstance(a, Thread):
attrs.update(arrowhead='none', arrowtail='tee')
return self.draw_edge(a, b, self.edge_scheme, attrs)

def subscript(n):
S = {'0': '₀', '1': '₁', '2': '₂', '3': '₃', '4': '₄',
'5': '₅', '6': '₆', '7': '₇', '8': '₈', '9': '₉'}
return ''.join([S[i] for i in str(n)])

class Worker(Node):
pass

class Backend(Node):
scheme = {'shape': 'folder', 'width': 2,
'height': 1, 'color': 'black',
'fillcolor': 'peachpuff3', 'color': 'peachpuff4'}

def label(self):
return generic_label(self) if generic else self._label

class Broker(Node):
scheme = {'shape': 'circle', 'fillcolor': 'cadetblue3',
'color': 'cadetblue4', 'height': 1}

def label(self):
return generic_label(self) if generic else self._label

from itertools import count
tids = count(1)
Wmax = int(args.get('wmax', 4) or 0)
Tmax = int(args.get('tmax', 3) or 0)

def maybe_abbr(l, name, max=Wmax):
size = len(l)
abbr = max and size > max
if 'enumerate' in args:
l = ['{0}{1}'.format(name, subscript(i + 1))
for i, obj in enumerate(l)]
if abbr:
l = l[0:max - 1] + [l[size - 1]]
l[max - 2] = '{0}⎨…{1}⎬'.format(
name[0], subscript(size - (max - 1)))
return l

try:
workers = args['nodes']
threads = args.get('threads') or []
except KeyError:
replies = self.app.control.inspect().stats()
workers, threads = [], []
for worker, reply in items(replies):
workers.append(worker)
threads.append(reply['pool']['max-concurrency'])

wlen = len(workers)
backend = args.get('backend', self.app.conf.CELERY_RESULT_BACKEND)
threads_for = {}
workers = maybe_abbr(workers, 'Worker')
if Wmax and wlen > Wmax:
threads = threads[0:3] + [threads[-1]]
for i, threads in enumerate(threads):
threads_for[workers[i]] = maybe_abbr(
list(range(int(threads))), 'P', Tmax,
)

broker = Broker(args.get('broker', self.app.connection().as_uri()))
backend = Backend(backend) if backend else None
graph = DependencyGraph(formatter=Formatter())
graph.add_arc(broker)
if backend:
graph.add_arc(backend)
curworker = [0]
for i, worker in enumerate(workers):
worker = Worker(worker, pos=i)
graph.add_arc(worker)
graph.add_edge(worker, broker)
if backend:
graph.add_edge(worker, backend)
threads = threads_for.get(worker._label)
if threads:
for thread in threads:
thread = Thread(thread)
graph.add_arc(thread)
graph.add_edge(thread, worker)

curworker[0] += 1

graph.to_dot(self.stdout)

+ 0
- 646
thesisenv/lib/python3.6/site-packages/celery/bin/multi.py View File

# -*- coding: utf-8 -*-
"""

.. program:: celery multi

Examples
========

.. code-block:: bash

# Single worker with explicit name and events enabled.
$ celery multi start Leslie -E

# Pidfiles and logfiles are stored in the current directory
# by default. Use --pidfile and --logfile argument to change
# this. The abbreviation %N will be expanded to the current
# node name.
$ celery multi start Leslie -E --pidfile=/var/run/celery/%N.pid
--logfile=/var/log/celery/%N.log


# You need to add the same arguments when you restart,
# as these are not persisted anywhere.
$ celery multi restart Leslie -E --pidfile=/var/run/celery/%N.pid
--logfile=/var/run/celery/%N.log

# To stop the node, you need to specify the same pidfile.
$ celery multi stop Leslie --pidfile=/var/run/celery/%N.pid

# 3 workers, with 3 processes each
$ celery multi start 3 -c 3
celery worker -n celery1@myhost -c 3
celery worker -n celery2@myhost -c 3
celery worker -n celery3@myhost -c 3

# start 3 named workers
$ celery multi start image video data -c 3
celery worker -n image@myhost -c 3
celery worker -n video@myhost -c 3
celery worker -n data@myhost -c 3

# specify custom hostname
$ celery multi start 2 --hostname=worker.example.com -c 3
celery worker -n celery1@worker.example.com -c 3
celery worker -n celery2@worker.example.com -c 3

# specify fully qualified nodenames
$ celery multi start foo@worker.example.com bar@worker.example.com -c 3

# Advanced example starting 10 workers in the background:
# * Three of the workers processes the images and video queue
# * Two of the workers processes the data queue with loglevel DEBUG
# * the rest processes the default' queue.
$ celery multi start 10 -l INFO -Q:1-3 images,video -Q:4,5 data
-Q default -L:4,5 DEBUG

# You can show the commands necessary to start the workers with
# the 'show' command:
$ celery multi show 10 -l INFO -Q:1-3 images,video -Q:4,5 data
-Q default -L:4,5 DEBUG

# Additional options are added to each celery worker' comamnd,
# but you can also modify the options for ranges of, or specific workers

# 3 workers: Two with 3 processes, and one with 10 processes.
$ celery multi start 3 -c 3 -c:1 10
celery worker -n celery1@myhost -c 10
celery worker -n celery2@myhost -c 3
celery worker -n celery3@myhost -c 3

# can also specify options for named workers
$ celery multi start image video data -c 3 -c:image 10
celery worker -n image@myhost -c 10
celery worker -n video@myhost -c 3
celery worker -n data@myhost -c 3

# ranges and lists of workers in options is also allowed:
# (-c:1-3 can also be written as -c:1,2,3)
$ celery multi start 5 -c 3 -c:1-3 10
celery worker -n celery1@myhost -c 10
celery worker -n celery2@myhost -c 10
celery worker -n celery3@myhost -c 10
celery worker -n celery4@myhost -c 3
celery worker -n celery5@myhost -c 3

# lists also works with named workers
$ celery multi start foo bar baz xuzzy -c 3 -c:foo,bar,baz 10
celery worker -n foo@myhost -c 10
celery worker -n bar@myhost -c 10
celery worker -n baz@myhost -c 10
celery worker -n xuzzy@myhost -c 3

"""
from __future__ import absolute_import, print_function, unicode_literals

import errno
import os
import shlex
import signal
import socket
import sys

from collections import defaultdict, namedtuple
from subprocess import Popen
from time import sleep

from kombu.utils import cached_property
from kombu.utils.compat import OrderedDict
from kombu.utils.encoding import from_utf8

from celery import VERSION_BANNER
from celery.five import items
from celery.platforms import Pidfile, IS_WINDOWS
from celery.utils import term, nodesplit
from celery.utils.text import pluralize

__all__ = ['MultiTool']

SIGNAMES = set(sig for sig in dir(signal)
if sig.startswith('SIG') and '_' not in sig)
SIGMAP = dict((getattr(signal, name), name) for name in SIGNAMES)

USAGE = """\
usage: {prog_name} start <node1 node2 nodeN|range> [worker options]
{prog_name} stop <n1 n2 nN|range> [-SIG (default: -TERM)]
{prog_name} stopwait <n1 n2 nN|range> [-SIG (default: -TERM)]
{prog_name} restart <n1 n2 nN|range> [-SIG] [worker options]
{prog_name} kill <n1 n2 nN|range>

{prog_name} show <n1 n2 nN|range> [worker options]
{prog_name} get hostname <n1 n2 nN|range> [-qv] [worker options]
{prog_name} names <n1 n2 nN|range>
{prog_name} expand template <n1 n2 nN|range>
{prog_name} help

additional options (must appear after command name):

* --nosplash: Don't display program info.
* --quiet: Don't show as much output.
* --verbose: Show more output.
* --no-color: Don't display colors.
"""

multi_args_t = namedtuple(
'multi_args_t', ('name', 'argv', 'expander', 'namespace'),
)


def main():
sys.exit(MultiTool().execute_from_commandline(sys.argv))


CELERY_EXE = 'celery'
if sys.version_info < (2, 7):
# pkg.__main__ first supported in Py2.7
CELERY_EXE = 'celery.__main__'


def celery_exe(*args):
return ' '.join((CELERY_EXE, ) + args)


class MultiTool(object):
retcode = 0 # Final exit code.

def __init__(self, env=None, fh=None, quiet=False, verbose=False,
no_color=False, nosplash=False, stdout=None, stderr=None):
"""fh is an old alias to stdout."""
self.stdout = self.fh = stdout or fh or sys.stdout
self.stderr = stderr or sys.stderr
self.env = env
self.nosplash = nosplash
self.quiet = quiet
self.verbose = verbose
self.no_color = no_color
self.prog_name = 'celery multi'
self.commands = {'start': self.start,
'show': self.show,
'stop': self.stop,
'stopwait': self.stopwait,
'stop_verify': self.stopwait, # compat alias
'restart': self.restart,
'kill': self.kill,
'names': self.names,
'expand': self.expand,
'get': self.get,
'help': self.help}

def execute_from_commandline(self, argv, cmd='celery worker'):
argv = list(argv) # don't modify callers argv.

# Reserve the --nosplash|--quiet|-q/--verbose options.
if '--nosplash' in argv:
self.nosplash = argv.pop(argv.index('--nosplash'))
if '--quiet' in argv:
self.quiet = argv.pop(argv.index('--quiet'))
if '-q' in argv:
self.quiet = argv.pop(argv.index('-q'))
if '--verbose' in argv:
self.verbose = argv.pop(argv.index('--verbose'))
if '--no-color' in argv:
self.no_color = argv.pop(argv.index('--no-color'))

self.prog_name = os.path.basename(argv.pop(0))
if not argv or argv[0][0] == '-':
return self.error()

try:
self.commands[argv[0]](argv[1:], cmd)
except KeyError:
self.error('Invalid command: {0}'.format(argv[0]))

return self.retcode

def say(self, m, newline=True, file=None):
print(m, file=file or self.stdout, end='\n' if newline else '')

def carp(self, m, newline=True, file=None):
return self.say(m, newline, file or self.stderr)

def names(self, argv, cmd):
p = NamespacedOptionParser(argv)
self.say('\n'.join(
n.name for n in multi_args(p, cmd)),
)

def get(self, argv, cmd):
wanted = argv[0]
p = NamespacedOptionParser(argv[1:])
for node in multi_args(p, cmd):
if node.name == wanted:
self.say(' '.join(node.argv))
return

def show(self, argv, cmd):
p = NamespacedOptionParser(argv)
self.with_detacher_default_options(p)
self.say('\n'.join(
' '.join([sys.executable] + n.argv) for n in multi_args(p, cmd)),
)

def start(self, argv, cmd):
self.splash()
p = NamespacedOptionParser(argv)
self.with_detacher_default_options(p)
retcodes = []
self.note('> Starting nodes...')
for node in multi_args(p, cmd):
self.note('\t> {0}: '.format(node.name), newline=False)
retcode = self.waitexec(node.argv, path=p.options['--executable'])
self.note(retcode and self.FAILED or self.OK)
retcodes.append(retcode)
self.retcode = int(any(retcodes))

def with_detacher_default_options(self, p):
_setdefaultopt(p.options, ['--pidfile', '-p'], '%N.pid')
_setdefaultopt(p.options, ['--logfile', '-f'], '%N.log')
p.options.setdefault(
'--cmd',
'-m {0}'.format(celery_exe('worker', '--detach')),
)
_setdefaultopt(p.options, ['--executable'], sys.executable)

def signal_node(self, nodename, pid, sig):
try:
os.kill(pid, sig)
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
self.note('Could not signal {0} ({1}): No such process'.format(
nodename, pid))
return False
return True

def node_alive(self, pid):
try:
os.kill(pid, 0)
except OSError as exc:
if exc.errno == errno.ESRCH:
return False
raise
return True

def shutdown_nodes(self, nodes, sig=signal.SIGTERM, retry=None,
callback=None):
if not nodes:
return
P = set(nodes)

def on_down(node):
P.discard(node)
if callback:
callback(*node)

self.note(self.colored.blue('> Stopping nodes...'))
for node in list(P):
if node in P:
nodename, _, pid = node
self.note('\t> {0}: {1} -> {2}'.format(
nodename, SIGMAP[sig][3:], pid))
if not self.signal_node(nodename, pid, sig):
on_down(node)

def note_waiting():
left = len(P)
if left:
pids = ', '.join(str(pid) for _, _, pid in P)
self.note(self.colored.blue(
'> Waiting for {0} {1} -> {2}...'.format(
left, pluralize(left, 'node'), pids)), newline=False)

if retry:
note_waiting()
its = 0
while P:
for node in P:
its += 1
self.note('.', newline=False)
nodename, _, pid = node
if not self.node_alive(pid):
self.note('\n\t> {0}: {1}'.format(nodename, self.OK))
on_down(node)
note_waiting()
break
if P and not its % len(P):
sleep(float(retry))
self.note('')

def getpids(self, p, cmd, callback=None):
_setdefaultopt(p.options, ['--pidfile', '-p'], '%N.pid')

nodes = []
for node in multi_args(p, cmd):
try:
pidfile_template = _getopt(
p.namespaces[node.namespace], ['--pidfile', '-p'],
)
except KeyError:
pidfile_template = _getopt(p.options, ['--pidfile', '-p'])
pid = None
pidfile = node.expander(pidfile_template)
try:
pid = Pidfile(pidfile).read_pid()
except ValueError:
pass
if pid:
nodes.append((node.name, tuple(node.argv), pid))
else:
self.note('> {0.name}: {1}'.format(node, self.DOWN))
if callback:
callback(node.name, node.argv, pid)

return nodes

def kill(self, argv, cmd):
self.splash()
p = NamespacedOptionParser(argv)
for nodename, _, pid in self.getpids(p, cmd):
self.note('Killing node {0} ({1})'.format(nodename, pid))
self.signal_node(nodename, pid, signal.SIGKILL)

def stop(self, argv, cmd, retry=None, callback=None):
self.splash()
p = NamespacedOptionParser(argv)
return self._stop_nodes(p, cmd, retry=retry, callback=callback)

def _stop_nodes(self, p, cmd, retry=None, callback=None):
restargs = p.args[len(p.values):]
self.shutdown_nodes(self.getpids(p, cmd, callback=callback),
sig=findsig(restargs),
retry=retry,
callback=callback)

def restart(self, argv, cmd):
self.splash()
p = NamespacedOptionParser(argv)
self.with_detacher_default_options(p)
retvals = []

def on_node_shutdown(nodename, argv, pid):
self.note(self.colored.blue(
'> Restarting node {0}: '.format(nodename)), newline=False)
retval = self.waitexec(argv, path=p.options['--executable'])
self.note(retval and self.FAILED or self.OK)
retvals.append(retval)

self._stop_nodes(p, cmd, retry=2, callback=on_node_shutdown)
self.retval = int(any(retvals))

def stopwait(self, argv, cmd):
self.splash()
p = NamespacedOptionParser(argv)
self.with_detacher_default_options(p)
return self._stop_nodes(p, cmd, retry=2)
stop_verify = stopwait # compat

def expand(self, argv, cmd=None):
template = argv[0]
p = NamespacedOptionParser(argv[1:])
for node in multi_args(p, cmd):
self.say(node.expander(template))

def help(self, argv, cmd=None):
self.say(__doc__)

def usage(self):
self.splash()
self.say(USAGE.format(prog_name=self.prog_name))

def splash(self):
if not self.nosplash:
c = self.colored
self.note(c.cyan('celery multi v{0}'.format(VERSION_BANNER)))

def waitexec(self, argv, path=sys.executable):
args = ' '.join([path] + list(argv))
argstr = shlex.split(from_utf8(args), posix=not IS_WINDOWS)
pipe = Popen(argstr, env=self.env)
self.info(' {0}'.format(' '.join(argstr)))
retcode = pipe.wait()
if retcode < 0:
self.note('* Child was terminated by signal {0}'.format(-retcode))
return -retcode
elif retcode > 0:
self.note('* Child terminated with errorcode {0}'.format(retcode))
return retcode

def error(self, msg=None):
if msg:
self.carp(msg)
self.usage()
self.retcode = 1
return 1

def info(self, msg, newline=True):
if self.verbose:
self.note(msg, newline=newline)

def note(self, msg, newline=True):
if not self.quiet:
self.say(str(msg), newline=newline)

@cached_property
def colored(self):
return term.colored(enabled=not self.no_color)

@cached_property
def OK(self):
return str(self.colored.green('OK'))

@cached_property
def FAILED(self):
return str(self.colored.red('FAILED'))

@cached_property
def DOWN(self):
return str(self.colored.magenta('DOWN'))


def multi_args(p, cmd='celery worker', append='', prefix='', suffix=''):
names = p.values
options = dict(p.options)
passthrough = p.passthrough
ranges = len(names) == 1
if ranges:
try:
noderange = int(names[0])
except ValueError:
pass
else:
names = [str(n) for n in range(1, noderange + 1)]
prefix = 'celery'
cmd = options.pop('--cmd', cmd)
append = options.pop('--append', append)
hostname = options.pop('--hostname',
options.pop('-n', socket.gethostname()))
prefix = options.pop('--prefix', prefix) or ''
suffix = options.pop('--suffix', suffix) or hostname
if suffix in ('""', "''"):
suffix = ''

for ns_name, ns_opts in list(items(p.namespaces)):
if ',' in ns_name or (ranges and '-' in ns_name):
for subns in parse_ns_range(ns_name, ranges):
p.namespaces[subns].update(ns_opts)
p.namespaces.pop(ns_name)

# Numbers in args always refers to the index in the list of names.
# (e.g. `start foo bar baz -c:1` where 1 is foo, 2 is bar, and so on).
for ns_name, ns_opts in list(items(p.namespaces)):
if ns_name.isdigit():
ns_index = int(ns_name) - 1
if ns_index < 0:
raise KeyError('Indexes start at 1 got: %r' % (ns_name, ))
try:
p.namespaces[names[ns_index]].update(ns_opts)
except IndexError:
raise KeyError('No node at index %r' % (ns_name, ))

for name in names:
this_suffix = suffix
if '@' in name:
this_name = options['-n'] = name
nodename, this_suffix = nodesplit(name)
name = nodename
else:
nodename = '%s%s' % (prefix, name)
this_name = options['-n'] = '%s@%s' % (nodename, this_suffix)
expand = abbreviations({'%h': this_name,
'%n': name,
'%N': nodename,
'%d': this_suffix})
argv = ([expand(cmd)] +
[format_opt(opt, expand(value))
for opt, value in items(p.optmerge(name, options))] +
[passthrough])
if append:
argv.append(expand(append))
yield multi_args_t(this_name, argv, expand, name)


class NamespacedOptionParser(object):

def __init__(self, args):
self.args = args
self.options = OrderedDict()
self.values = []
self.passthrough = ''
self.namespaces = defaultdict(lambda: OrderedDict())

self.parse()

def parse(self):
rargs = list(self.args)
pos = 0
while pos < len(rargs):
arg = rargs[pos]
if arg == '--':
self.passthrough = ' '.join(rargs[pos:])
break
elif arg[0] == '-':
if arg[1] == '-':
self.process_long_opt(arg[2:])
else:
value = None
if len(rargs) > pos + 1 and rargs[pos + 1][0] != '-':
value = rargs[pos + 1]
pos += 1
self.process_short_opt(arg[1:], value)
else:
self.values.append(arg)
pos += 1

def process_long_opt(self, arg, value=None):
if '=' in arg:
arg, value = arg.split('=', 1)
self.add_option(arg, value, short=False)

def process_short_opt(self, arg, value=None):
self.add_option(arg, value, short=True)

def optmerge(self, ns, defaults=None):
if defaults is None:
defaults = self.options
return OrderedDict(defaults, **self.namespaces[ns])

def add_option(self, name, value, short=False, ns=None):
prefix = short and '-' or '--'
dest = self.options
if ':' in name:
name, ns = name.split(':')
dest = self.namespaces[ns]
dest[prefix + name] = value


def quote(v):
return "\\'".join("'" + p + "'" for p in v.split("'"))


def format_opt(opt, value):
if not value:
return opt
if opt.startswith('--'):
return '{0}={1}'.format(opt, value)
return '{0} {1}'.format(opt, value)


def parse_ns_range(ns, ranges=False):
ret = []
for space in ',' in ns and ns.split(',') or [ns]:
if ranges and '-' in space:
start, stop = space.split('-')
ret.extend(
str(n) for n in range(int(start), int(stop) + 1)
)
else:
ret.append(space)
return ret


def abbreviations(mapping):

def expand(S):
ret = S
if S is not None:
for short_opt, long_opt in items(mapping):
ret = ret.replace(short_opt, long_opt)
return ret

return expand


def findsig(args, default=signal.SIGTERM):
for arg in reversed(args):
if len(arg) == 2 and arg[0] == '-':
try:
return int(arg[1])
except ValueError:
pass
if arg[0] == '-':
maybe_sig = 'SIG' + arg[1:]
if maybe_sig in SIGNAMES:
return getattr(signal, maybe_sig)
return default


def _getopt(d, alt):
for opt in alt:
try:
return d[opt]
except KeyError:
pass
raise KeyError(alt[0])


def _setdefaultopt(d, alt, value):
for opt in alt[1:]:
try:
return d[opt]
except KeyError:
pass
return d.setdefault(alt[0], value)


if __name__ == '__main__': # pragma: no cover
main()

+ 0
- 270
thesisenv/lib/python3.6/site-packages/celery/bin/worker.py View File

# -*- coding: utf-8 -*-
"""

The :program:`celery worker` command (previously known as ``celeryd``)

.. program:: celery worker

.. seealso::

See :ref:`preload-options`.

.. cmdoption:: -c, --concurrency

Number of child processes processing the queue. The default
is the number of CPUs available on your system.

.. cmdoption:: -P, --pool

Pool implementation:

prefork (default), eventlet, gevent, solo or threads.

.. cmdoption:: -f, --logfile

Path to log file. If no logfile is specified, `stderr` is used.

.. cmdoption:: -l, --loglevel

Logging level, choose between `DEBUG`, `INFO`, `WARNING`,
`ERROR`, `CRITICAL`, or `FATAL`.

.. cmdoption:: -n, --hostname

Set custom hostname, e.g. 'w1.%h'. Expands: %h (hostname),
%n (name) and %d, (domain).

.. cmdoption:: -B, --beat

Also run the `celery beat` periodic task scheduler. Please note that
there must only be one instance of this service.

.. cmdoption:: -Q, --queues

List of queues to enable for this worker, separated by comma.
By default all configured queues are enabled.
Example: `-Q video,image`

.. cmdoption:: -I, --include

Comma separated list of additional modules to import.
Example: -I foo.tasks,bar.tasks

.. cmdoption:: -s, --schedule

Path to the schedule database if running with the `-B` option.
Defaults to `celerybeat-schedule`. The extension ".db" may be
appended to the filename.

.. cmdoption:: -O

Apply optimization profile. Supported: default, fair

.. cmdoption:: --scheduler

Scheduler class to use. Default is celery.beat.PersistentScheduler

.. cmdoption:: -S, --statedb

Path to the state database. The extension '.db' may
be appended to the filename. Default: {default}

.. cmdoption:: -E, --events

Send events that can be captured by monitors like :program:`celery events`,
`celerymon`, and others.

.. cmdoption:: --without-gossip

Do not subscribe to other workers events.

.. cmdoption:: --without-mingle

Do not synchronize with other workers at startup.

.. cmdoption:: --without-heartbeat

Do not send event heartbeats.

.. cmdoption:: --heartbeat-interval

Interval in seconds at which to send worker heartbeat

.. cmdoption:: --purge

Purges all waiting tasks before the daemon is started.
**WARNING**: This is unrecoverable, and the tasks will be
deleted from the messaging server.

.. cmdoption:: --time-limit

Enables a hard time limit (in seconds int/float) for tasks.

.. cmdoption:: --soft-time-limit

Enables a soft time limit (in seconds int/float) for tasks.

.. cmdoption:: --maxtasksperchild

Maximum number of tasks a pool worker can execute before it's
terminated and replaced by a new worker.

.. cmdoption:: --pidfile

Optional file used to store the workers pid.

The worker will not start if this file already exists
and the pid is still alive.

.. cmdoption:: --autoscale

Enable autoscaling by providing
max_concurrency, min_concurrency. Example::

--autoscale=10,3

(always keep 3 processes, but grow to 10 if necessary)

.. cmdoption:: --autoreload

Enable autoreloading.

.. cmdoption:: --no-execv

Don't do execv after multiprocessing child fork.

"""
from __future__ import absolute_import, unicode_literals

import sys

from celery import concurrency
from celery.bin.base import Command, Option, daemon_options
from celery.bin.celeryd_detach import detached_celeryd
from celery.five import string_t
from celery.platforms import maybe_drop_privileges
from celery.utils import default_nodename
from celery.utils.log import LOG_LEVELS, mlevel

__all__ = ['worker', 'main']

__MODULE_DOC__ = __doc__


class worker(Command):
"""Start worker instance.

Examples::

celery worker --app=proj -l info
celery worker -A proj -l info -Q hipri,lopri

celery worker -A proj --concurrency=4
celery worker -A proj --concurrency=1000 -P eventlet

celery worker --autoscale=10,0
"""
doc = __MODULE_DOC__ # parse help from this too
namespace = 'celeryd'
enable_config_from_cmdline = True
supports_args = False

def run_from_argv(self, prog_name, argv=None, command=None):
command = sys.argv[0] if command is None else command
argv = sys.argv[1:] if argv is None else argv
# parse options before detaching so errors can be handled.
options, args = self.prepare_args(
*self.parse_options(prog_name, argv, command))
self.maybe_detach([command] + argv)
return self(*args, **options)

def maybe_detach(self, argv, dopts=['-D', '--detach']):
if any(arg in argv for arg in dopts):
argv = [v for v in argv if v not in dopts]
# will never return
detached_celeryd(self.app).execute_from_commandline(argv)
raise SystemExit(0)

def run(self, hostname=None, pool_cls=None, app=None, uid=None, gid=None,
loglevel=None, logfile=None, pidfile=None, state_db=None,
**kwargs):
maybe_drop_privileges(uid=uid, gid=gid)
# Pools like eventlet/gevent needs to patch libs as early
# as possible.
pool_cls = (concurrency.get_implementation(pool_cls) or
self.app.conf.CELERYD_POOL)
if self.app.IS_WINDOWS and kwargs.get('beat'):
self.die('-B option does not work on Windows. '
'Please run celery beat as a separate service.')
hostname = self.host_format(default_nodename(hostname))
if loglevel:
try:
loglevel = mlevel(loglevel)
except KeyError: # pragma: no cover
self.die('Unknown level {0!r}. Please use one of {1}.'.format(
loglevel, '|'.join(
l for l in LOG_LEVELS if isinstance(l, string_t))))

return self.app.Worker(
hostname=hostname, pool_cls=pool_cls, loglevel=loglevel,
logfile=logfile, # node format handled by celery.app.log.setup
pidfile=self.node_format(pidfile, hostname),
state_db=self.node_format(state_db, hostname), **kwargs
).start()

def with_pool_option(self, argv):
# this command support custom pools
# that may have to be loaded as early as possible.
return (['-P'], ['--pool'])

def get_options(self):
conf = self.app.conf
return (
Option('-c', '--concurrency',
default=conf.CELERYD_CONCURRENCY, type='int'),
Option('-P', '--pool', default=conf.CELERYD_POOL, dest='pool_cls'),
Option('--purge', '--discard', default=False, action='store_true'),
Option('-l', '--loglevel', default=conf.CELERYD_LOG_LEVEL),
Option('-n', '--hostname'),
Option('-B', '--beat', action='store_true'),
Option('-s', '--schedule', dest='schedule_filename',
default=conf.CELERYBEAT_SCHEDULE_FILENAME),
Option('--scheduler', dest='scheduler_cls'),
Option('-S', '--statedb',
default=conf.CELERYD_STATE_DB, dest='state_db'),
Option('-E', '--events', default=conf.CELERY_SEND_EVENTS,
action='store_true', dest='send_events'),
Option('--time-limit', type='float', dest='task_time_limit',
default=conf.CELERYD_TASK_TIME_LIMIT),
Option('--soft-time-limit', dest='task_soft_time_limit',
default=conf.CELERYD_TASK_SOFT_TIME_LIMIT, type='float'),
Option('--maxtasksperchild', dest='max_tasks_per_child',
default=conf.CELERYD_MAX_TASKS_PER_CHILD, type='int'),
Option('--queues', '-Q', default=[]),
Option('--exclude-queues', '-X', default=[]),
Option('--include', '-I', default=[]),
Option('--autoscale'),
Option('--autoreload', action='store_true'),
Option('--no-execv', action='store_true', default=False),
Option('--without-gossip', action='store_true', default=False),
Option('--without-mingle', action='store_true', default=False),
Option('--without-heartbeat', action='store_true', default=False),
Option('--heartbeat-interval', type='int'),
Option('-O', dest='optimization'),
Option('-D', '--detach', action='store_true'),
) + daemon_options() + tuple(self.app.user_options['worker'])


def main(app=None):
# Fix for setuptools generated scripts, so that it will
# work with multiprocessing fork emulation.
# (see multiprocessing.forking.get_preparation_data())
if __name__ != '__main__': # pragma: no cover
sys.modules['__main__'] = sys.modules[__name__]
from billiard import freeze_support
freeze_support()
worker(app=app).execute_from_commandline()


if __name__ == '__main__': # pragma: no cover
main()

+ 0
- 422
thesisenv/lib/python3.6/site-packages/celery/bootsteps.py View File

# -*- coding: utf-8 -*-
"""
celery.bootsteps
~~~~~~~~~~~~~~~~

A directed acyclic graph of reusable components.

"""
from __future__ import absolute_import, unicode_literals

from collections import deque
from threading import Event

from kombu.common import ignore_errors
from kombu.utils import symbol_by_name

from .datastructures import DependencyGraph, GraphFormatter
from .five import values, with_metaclass
from .utils.imports import instantiate, qualname
from .utils.log import get_logger

try:
from greenlet import GreenletExit
IGNORE_ERRORS = (GreenletExit, )
except ImportError: # pragma: no cover
IGNORE_ERRORS = ()

__all__ = ['Blueprint', 'Step', 'StartStopStep', 'ConsumerStep']

#: States
RUN = 0x1
CLOSE = 0x2
TERMINATE = 0x3

logger = get_logger(__name__)
debug = logger.debug


def _pre(ns, fmt):
return '| {0}: {1}'.format(ns.alias, fmt)


def _label(s):
return s.name.rsplit('.', 1)[-1]


class StepFormatter(GraphFormatter):
"""Graph formatter for :class:`Blueprint`."""

blueprint_prefix = '⧉'
conditional_prefix = '∘'
blueprint_scheme = {
'shape': 'parallelogram',
'color': 'slategray4',
'fillcolor': 'slategray3',
}

def label(self, step):
return step and '{0}{1}'.format(
self._get_prefix(step),
(step.label or _label(step)).encode('utf-8', 'ignore'),
)

def _get_prefix(self, step):
if step.last:
return self.blueprint_prefix
if step.conditional:
return self.conditional_prefix
return ''

def node(self, obj, **attrs):
scheme = self.blueprint_scheme if obj.last else self.node_scheme
return self.draw_node(obj, scheme, attrs)

def edge(self, a, b, **attrs):
if a.last:
attrs.update(arrowhead='none', color='darkseagreen3')
return self.draw_edge(a, b, self.edge_scheme, attrs)


class Blueprint(object):
"""Blueprint containing bootsteps that can be applied to objects.

:keyword steps: List of steps.
:keyword name: Set explicit name for this blueprint.
:keyword app: Set the Celery app for this blueprint.
:keyword on_start: Optional callback applied after blueprint start.
:keyword on_close: Optional callback applied before blueprint close.
:keyword on_stopped: Optional callback applied after blueprint stopped.

"""
GraphFormatter = StepFormatter

name = None
state = None
started = 0
default_steps = set()
state_to_name = {
0: 'initializing',
RUN: 'running',
CLOSE: 'closing',
TERMINATE: 'terminating',
}

def __init__(self, steps=None, name=None, app=None,
on_start=None, on_close=None, on_stopped=None):
self.app = app
self.name = name or self.name or qualname(type(self))
self.types = set(steps or []) | set(self.default_steps)
self.on_start = on_start
self.on_close = on_close
self.on_stopped = on_stopped
self.shutdown_complete = Event()
self.steps = {}

def start(self, parent):
self.state = RUN
if self.on_start:
self.on_start()
for i, step in enumerate(s for s in parent.steps if s is not None):
self._debug('Starting %s', step.alias)
self.started = i + 1
step.start(parent)
debug('^-- substep ok')

def human_state(self):
return self.state_to_name[self.state or 0]

def info(self, parent):
info = {}
for step in parent.steps:
info.update(step.info(parent) or {})
return info

def close(self, parent):
if self.on_close:
self.on_close()
self.send_all(parent, 'close', 'closing', reverse=False)

def restart(self, parent, method='stop',
description='restarting', propagate=False):
self.send_all(parent, method, description, propagate=propagate)

def send_all(self, parent, method,
description=None, reverse=True, propagate=True, args=()):
description = description or method.replace('_', ' ')
steps = reversed(parent.steps) if reverse else parent.steps
for step in steps:
if step:
fun = getattr(step, method, None)
if fun is not None:
self._debug('%s %s...',
description.capitalize(), step.alias)
try:
fun(parent, *args)
except Exception as exc:
if propagate:
raise
logger.error(
'Error on %s %s: %r',
description, step.alias, exc, exc_info=1,
)

def stop(self, parent, close=True, terminate=False):
what = 'terminating' if terminate else 'stopping'
if self.state in (CLOSE, TERMINATE):
return

if self.state != RUN or self.started != len(parent.steps):
# Not fully started, can safely exit.
self.state = TERMINATE
self.shutdown_complete.set()
return
self.close(parent)
self.state = CLOSE

self.restart(
parent, 'terminate' if terminate else 'stop',
description=what, propagate=False,
)

if self.on_stopped:
self.on_stopped()
self.state = TERMINATE
self.shutdown_complete.set()

def join(self, timeout=None):
try:
# Will only get here if running green,
# makes sure all greenthreads have exited.
self.shutdown_complete.wait(timeout=timeout)
except IGNORE_ERRORS:
pass

def apply(self, parent, **kwargs):
"""Apply the steps in this blueprint to an object.

This will apply the ``__init__`` and ``include`` methods
of each step, with the object as argument::

step = Step(obj)
...
step.include(obj)

For :class:`StartStopStep` the services created
will also be added to the objects ``steps`` attribute.

"""
self._debug('Preparing bootsteps.')
order = self.order = []
steps = self.steps = self.claim_steps()

self._debug('Building graph...')
for S in self._finalize_steps(steps):
step = S(parent, **kwargs)
steps[step.name] = step
order.append(step)
self._debug('New boot order: {%s}',
', '.join(s.alias for s in self.order))
for step in order:
step.include(parent)
return self

def connect_with(self, other):
self.graph.adjacent.update(other.graph.adjacent)
self.graph.add_edge(type(other.order[0]), type(self.order[-1]))

def __getitem__(self, name):
return self.steps[name]

def _find_last(self):
return next((C for C in values(self.steps) if C.last), None)

def _firstpass(self, steps):
for step in values(steps):
step.requires = [symbol_by_name(dep) for dep in step.requires]
stream = deque(step.requires for step in values(steps))
while stream:
for node in stream.popleft():
node = symbol_by_name(node)
if node.name not in self.steps:
steps[node.name] = node
stream.append(node.requires)

def _finalize_steps(self, steps):
last = self._find_last()
self._firstpass(steps)
it = ((C, C.requires) for C in values(steps))
G = self.graph = DependencyGraph(
it, formatter=self.GraphFormatter(root=last),
)
if last:
for obj in G:
if obj != last:
G.add_edge(last, obj)
try:
return G.topsort()
except KeyError as exc:
raise KeyError('unknown bootstep: %s' % exc)

def claim_steps(self):
return dict(self.load_step(step) for step in self._all_steps())

def _all_steps(self):
return self.types | self.app.steps[self.name.lower()]

def load_step(self, step):
step = symbol_by_name(step)
return step.name, step

def _debug(self, msg, *args):
return debug(_pre(self, msg), *args)

@property
def alias(self):
return _label(self)


class StepType(type):
"""Metaclass for steps."""

def __new__(cls, name, bases, attrs):
module = attrs.get('__module__')
qname = '{0}.{1}'.format(module, name) if module else name
attrs.update(
__qualname__=qname,
name=attrs.get('name') or qname,
)
return super(StepType, cls).__new__(cls, name, bases, attrs)

def __str__(self):
return self.name

def __repr__(self):
return 'step:{0.name}{{{0.requires!r}}}'.format(self)


@with_metaclass(StepType)
class Step(object):
"""A Bootstep.

The :meth:`__init__` method is called when the step
is bound to a parent object, and can as such be used
to initialize attributes in the parent object at
parent instantiation-time.

"""

#: Optional step name, will use qualname if not specified.
name = None

#: Optional short name used for graph outputs and in logs.
label = None

#: Set this to true if the step is enabled based on some condition.
conditional = False

#: List of other steps that that must be started before this step.
#: Note that all dependencies must be in the same blueprint.
requires = ()

#: This flag is reserved for the workers Consumer,
#: since it is required to always be started last.
#: There can only be one object marked last
#: in every blueprint.
last = False

#: This provides the default for :meth:`include_if`.
enabled = True

def __init__(self, parent, **kwargs):
pass

def include_if(self, parent):
"""An optional predicate that decides whether this
step should be created."""
return self.enabled

def instantiate(self, name, *args, **kwargs):
return instantiate(name, *args, **kwargs)

def _should_include(self, parent):
if self.include_if(parent):
return True, self.create(parent)
return False, None

def include(self, parent):
return self._should_include(parent)[0]

def create(self, parent):
"""Create the step."""
pass

def __repr__(self):
return '<step: {0.alias}>'.format(self)

@property
def alias(self):
return self.label or _label(self)

def info(self, obj):
pass


class StartStopStep(Step):

#: Optional obj created by the :meth:`create` method.
#: This is used by :class:`StartStopStep` to keep the
#: original service object.
obj = None

def start(self, parent):
if self.obj:
return self.obj.start()

def stop(self, parent):
if self.obj:
return self.obj.stop()

def close(self, parent):
pass

def terminate(self, parent):
if self.obj:
return getattr(self.obj, 'terminate', self.obj.stop)()

def include(self, parent):
inc, ret = self._should_include(parent)
if inc:
self.obj = ret
parent.steps.append(self)
return inc


class ConsumerStep(StartStopStep):
requires = ('celery.worker.consumer:Connection', )
consumers = None

def get_consumers(self, channel):
raise NotImplementedError('missing get_consumers')

def start(self, c):
channel = c.connection.channel()
self.consumers = self.get_consumers(channel)
for consumer in self.consumers or []:
consumer.consume()

def stop(self, c):
self._close(c, True)

def shutdown(self, c):
self._close(c, False)

def _close(self, c, cancel_consumers=True):
channels = set()
for consumer in self.consumers or []:
if cancel_consumers:
ignore_errors(c.connection, consumer.cancel)
if consumer.channel:
channels.add(consumer.channel)
for channel in channels:
ignore_errors(c.connection, channel.close)

+ 0
- 698
thesisenv/lib/python3.6/site-packages/celery/canvas.py View File

# -*- coding: utf-8 -*-
"""
celery.canvas
~~~~~~~~~~~~~

Composing task workflows.

Documentation for some of these types are in :mod:`celery`.
You should import these from :mod:`celery` and not this module.


"""
from __future__ import absolute_import

from collections import MutableSequence
from copy import deepcopy
from functools import partial as _partial, reduce
from operator import itemgetter
from itertools import chain as _chain

from kombu.utils import cached_property, fxrange, kwdict, reprcall, uuid

from celery._state import current_app
from celery.utils.functional import (
maybe_list, is_list, regen,
chunks as _chunks,
)
from celery.utils.text import truncate

__all__ = ['Signature', 'chain', 'xmap', 'xstarmap', 'chunks',
'group', 'chord', 'signature', 'maybe_signature']


class _getitem_property(object):
"""Attribute -> dict key descriptor.

The target object must support ``__getitem__``,
and optionally ``__setitem__``.

Example:

>>> from collections import defaultdict

>>> class Me(dict):
... deep = defaultdict(dict)
...
... foo = _getitem_property('foo')
... deep_thing = _getitem_property('deep.thing')


>>> me = Me()
>>> me.foo
None

>>> me.foo = 10
>>> me.foo
10
>>> me['foo']
10

>>> me.deep_thing = 42
>>> me.deep_thing
42
>>> me.deep
defaultdict(<type 'dict'>, {'thing': 42})

"""

def __init__(self, keypath):
path, _, self.key = keypath.rpartition('.')
self.path = path.split('.') if path else None

def _path(self, obj):
return (reduce(lambda d, k: d[k], [obj] + self.path) if self.path
else obj)

def __get__(self, obj, type=None):
if obj is None:
return type
return self._path(obj).get(self.key)

def __set__(self, obj, value):
self._path(obj)[self.key] = value


def maybe_unroll_group(g):
"""Unroll group with only one member."""
# Issue #1656
try:
size = len(g.tasks)
except TypeError:
try:
size = g.tasks.__length_hint__()
except (AttributeError, TypeError):
pass
else:
return list(g.tasks)[0] if size == 1 else g
else:
return g.tasks[0] if size == 1 else g


def _upgrade(fields, sig):
"""Used by custom signatures in .from_dict, to keep common fields."""
sig.update(chord_size=fields.get('chord_size'))
return sig


class Signature(dict):
"""Class that wraps the arguments and execution options
for a single task invocation.

Used as the parts in a :class:`group` and other constructs,
or to pass tasks around as callbacks while being compatible
with serializers with a strict type subset.

:param task: Either a task class/instance, or the name of a task.
:keyword args: Positional arguments to apply.
:keyword kwargs: Keyword arguments to apply.
:keyword options: Additional options to :meth:`Task.apply_async`.

Note that if the first argument is a :class:`dict`, the other
arguments will be ignored and the values in the dict will be used
instead.

>>> s = signature('tasks.add', args=(2, 2))
>>> signature(s)
{'task': 'tasks.add', args=(2, 2), kwargs={}, options={}}

"""
TYPES = {}
_app = _type = None

@classmethod
def register_type(cls, subclass, name=None):
cls.TYPES[name or subclass.__name__] = subclass
return subclass

@classmethod
def from_dict(self, d, app=None):
typ = d.get('subtask_type')
if typ:
return self.TYPES[typ].from_dict(kwdict(d), app=app)
return Signature(d, app=app)

def __init__(self, task=None, args=None, kwargs=None, options=None,
type=None, subtask_type=None, immutable=False,
app=None, **ex):
self._app = app
init = dict.__init__

if isinstance(task, dict):
return init(self, task) # works like dict(d)

# Also supports using task class/instance instead of string name.
try:
task_name = task.name
except AttributeError:
task_name = task
else:
self._type = task

init(self,
task=task_name, args=tuple(args or ()),
kwargs=kwargs or {},
options=dict(options or {}, **ex),
subtask_type=subtask_type,
immutable=immutable,
chord_size=None)

def __call__(self, *partial_args, **partial_kwargs):
args, kwargs, _ = self._merge(partial_args, partial_kwargs, None)
return self.type(*args, **kwargs)

def delay(self, *partial_args, **partial_kwargs):
return self.apply_async(partial_args, partial_kwargs)

def apply(self, args=(), kwargs={}, **options):
"""Apply this task locally."""
# For callbacks: extra args are prepended to the stored args.
args, kwargs, options = self._merge(args, kwargs, options)
return self.type.apply(args, kwargs, **options)

def _merge(self, args=(), kwargs={}, options={}):
if self.immutable:
return (self.args, self.kwargs,
dict(self.options, **options) if options else self.options)
return (tuple(args) + tuple(self.args) if args else self.args,
dict(self.kwargs, **kwargs) if kwargs else self.kwargs,
dict(self.options, **options) if options else self.options)

def clone(self, args=(), kwargs={}, app=None, **opts):
# need to deepcopy options so origins links etc. is not modified.
if args or kwargs or opts:
args, kwargs, opts = self._merge(args, kwargs, opts)
else:
args, kwargs, opts = self.args, self.kwargs, self.options
s = Signature.from_dict({'task': self.task, 'args': tuple(args),
'kwargs': kwargs, 'options': deepcopy(opts),
'subtask_type': self.subtask_type,
'chord_size': self.chord_size,
'immutable': self.immutable},
app=app or self._app)
s._type = self._type
return s
partial = clone

def freeze(self, _id=None, group_id=None, chord=None):
opts = self.options
try:
tid = opts['task_id']
except KeyError:
tid = opts['task_id'] = _id or uuid()
if 'reply_to' not in opts:
opts['reply_to'] = self.app.oid
if group_id:
opts['group_id'] = group_id
if chord:
opts['chord'] = chord
return self.app.AsyncResult(tid)
_freeze = freeze

def replace(self, args=None, kwargs=None, options=None):
s = self.clone()
if args is not None:
s.args = args
if kwargs is not None:
s.kwargs = kwargs
if options is not None:
s.options = options
return s

def set(self, immutable=None, **options):
if immutable is not None:
self.set_immutable(immutable)
self.options.update(options)
return self

def set_immutable(self, immutable):
self.immutable = immutable

def apply_async(self, args=(), kwargs={}, **options):
try:
_apply = self._apply_async
except IndexError: # no tasks for chain, etc to find type
return
# For callbacks: extra args are prepended to the stored args.
if args or kwargs or options:
args, kwargs, options = self._merge(args, kwargs, options)
else:
args, kwargs, options = self.args, self.kwargs, self.options
return _apply(args, kwargs, **options)

def append_to_list_option(self, key, value):
items = self.options.setdefault(key, [])
if not isinstance(items, MutableSequence):
items = self.options[key] = [items]
if value not in items:
items.append(value)
return value

def link(self, callback):
return self.append_to_list_option('link', callback)

def link_error(self, errback):
return self.append_to_list_option('link_error', errback)

def flatten_links(self):
return list(_chain.from_iterable(_chain(
[[self]],
(link.flatten_links()
for link in maybe_list(self.options.get('link')) or [])
)))

def __or__(self, other):
if isinstance(other, group):
other = maybe_unroll_group(other)
if not isinstance(self, chain) and isinstance(other, chain):
return chain((self, ) + other.tasks, app=self._app)
elif isinstance(other, chain):
return chain(*self.tasks + other.tasks, app=self._app)
elif isinstance(other, Signature):
if isinstance(self, chain):
return chain(*self.tasks + (other, ), app=self._app)
return chain(self, other, app=self._app)
return NotImplemented

def __deepcopy__(self, memo):
memo[id(self)] = self
return dict(self)

def __invert__(self):
return self.apply_async().get()

def __reduce__(self):
# for serialization, the task type is lazily loaded,
# and not stored in the dict itself.
return subtask, (dict(self), )

def reprcall(self, *args, **kwargs):
args, kwargs, _ = self._merge(args, kwargs, {})
return reprcall(self['task'], args, kwargs)

def election(self):
type = self.type
app = type.app
tid = self.options.get('task_id') or uuid()

with app.producer_or_acquire(None) as P:
props = type.backend.on_task_call(P, tid)
app.control.election(tid, 'task', self.clone(task_id=tid, **props),
connection=P.connection)
return type.AsyncResult(tid)

def __repr__(self):
return self.reprcall()

@cached_property
def type(self):
return self._type or self.app.tasks[self['task']]

@cached_property
def app(self):
return self._app or current_app

@cached_property
def AsyncResult(self):
try:
return self.type.AsyncResult
except KeyError: # task not registered
return self.app.AsyncResult

@cached_property
def _apply_async(self):
try:
return self.type.apply_async
except KeyError:
return _partial(self.app.send_task, self['task'])
id = _getitem_property('options.task_id')
task = _getitem_property('task')
args = _getitem_property('args')
kwargs = _getitem_property('kwargs')
options = _getitem_property('options')
subtask_type = _getitem_property('subtask_type')
chord_size = _getitem_property('chord_size')
immutable = _getitem_property('immutable')


@Signature.register_type
class chain(Signature):

def __init__(self, *tasks, **options):
tasks = (regen(tasks[0]) if len(tasks) == 1 and is_list(tasks[0])
else tasks)
Signature.__init__(
self, 'celery.chain', (), {'tasks': tasks}, **options
)
self.tasks = tasks
self.subtask_type = 'chain'

def __call__(self, *args, **kwargs):
if self.tasks:
return self.apply_async(args, kwargs)

@classmethod
def from_dict(self, d, app=None):
tasks = [maybe_signature(t, app=app) for t in d['kwargs']['tasks']]
if d['args'] and tasks:
# partial args passed on to first task in chain (Issue #1057).
tasks[0]['args'] = tasks[0]._merge(d['args'])[0]
return _upgrade(d, chain(*tasks, app=app, **d['options']))

@property
def type(self):
try:
return self._type or self.tasks[0].type.app.tasks['celery.chain']
except KeyError:
return self.app.tasks['celery.chain']

def __repr__(self):
return ' | '.join(repr(t) for t in self.tasks)


class _basemap(Signature):
_task_name = None
_unpack_args = itemgetter('task', 'it')

def __init__(self, task, it, **options):
Signature.__init__(
self, self._task_name, (),
{'task': task, 'it': regen(it)}, immutable=True, **options
)

def apply_async(self, args=(), kwargs={}, **opts):
# need to evaluate generators
task, it = self._unpack_args(self.kwargs)
return self.type.apply_async(
(), {'task': task, 'it': list(it)}, **opts
)

@classmethod
def from_dict(cls, d, app=None):
return _upgrade(
d, cls(*cls._unpack_args(d['kwargs']), app=app, **d['options']),
)


@Signature.register_type
class xmap(_basemap):
_task_name = 'celery.map'

def __repr__(self):
task, it = self._unpack_args(self.kwargs)
return '[{0}(x) for x in {1}]'.format(task.task,
truncate(repr(it), 100))


@Signature.register_type
class xstarmap(_basemap):
_task_name = 'celery.starmap'

def __repr__(self):
task, it = self._unpack_args(self.kwargs)
return '[{0}(*x) for x in {1}]'.format(task.task,
truncate(repr(it), 100))


@Signature.register_type
class chunks(Signature):
_unpack_args = itemgetter('task', 'it', 'n')

def __init__(self, task, it, n, **options):
Signature.__init__(
self, 'celery.chunks', (),
{'task': task, 'it': regen(it), 'n': n},
immutable=True, **options
)

@classmethod
def from_dict(self, d, app=None):
return _upgrade(
d, chunks(*self._unpack_args(
d['kwargs']), app=app, **d['options']),
)

def apply_async(self, args=(), kwargs={}, **opts):
return self.group().apply_async(args, kwargs, **opts)

def __call__(self, **options):
return self.group()(**options)

def group(self):
# need to evaluate generators
task, it, n = self._unpack_args(self.kwargs)
return group((xstarmap(task, part, app=self._app)
for part in _chunks(iter(it), n)),
app=self._app)

@classmethod
def apply_chunks(cls, task, it, n, app=None):
return cls(task, it, n, app=app)()


def _maybe_group(tasks):
if isinstance(tasks, group):
tasks = list(tasks.tasks)
elif isinstance(tasks, Signature):
tasks = [tasks]
else:
tasks = regen(tasks)
return tasks


def _maybe_clone(tasks, app):
return [s.clone() if isinstance(s, Signature) else signature(s, app=app)
for s in tasks]


@Signature.register_type
class group(Signature):

def __init__(self, *tasks, **options):
if len(tasks) == 1:
tasks = _maybe_group(tasks[0])
Signature.__init__(
self, 'celery.group', (), {'tasks': tasks}, **options
)
self.tasks, self.subtask_type = tasks, 'group'

@classmethod
def from_dict(self, d, app=None):
tasks = [maybe_signature(t, app=app) for t in d['kwargs']['tasks']]
if d['args'] and tasks:
# partial args passed on to all tasks in the group (Issue #1057).
for task in tasks:
task['args'] = task._merge(d['args'])[0]
return _upgrade(d, group(tasks, app=app, **kwdict(d['options'])))

def apply_async(self, args=(), kwargs=None, add_to_parent=True, **options):
tasks = _maybe_clone(self.tasks, app=self._app)
if not tasks:
return self.freeze()
type = self.type
return type(*type.prepare(dict(self.options, **options), tasks, args),
add_to_parent=add_to_parent)

def set_immutable(self, immutable):
for task in self.tasks:
task.set_immutable(immutable)

def link(self, sig):
# Simply link to first task
sig = sig.clone().set(immutable=True)
return self.tasks[0].link(sig)

def link_error(self, sig):
sig = sig.clone().set(immutable=True)
return self.tasks[0].link_error(sig)

def apply(self, *args, **kwargs):
if not self.tasks:
return self.freeze() # empty group returns GroupResult
return Signature.apply(self, *args, **kwargs)

def __call__(self, *partial_args, **options):
return self.apply_async(partial_args, **options)

def freeze(self, _id=None, group_id=None, chord=None):
opts = self.options
try:
gid = opts['task_id']
except KeyError:
gid = opts['task_id'] = uuid()
if group_id:
opts['group_id'] = group_id
if chord:
opts['chord'] = group_id
new_tasks, results = [], []
for task in self.tasks:
task = maybe_signature(task, app=self._app).clone()
results.append(task.freeze(group_id=group_id, chord=chord))
new_tasks.append(task)
self.tasks = self.kwargs['tasks'] = new_tasks
return self.app.GroupResult(gid, results)
_freeze = freeze

def skew(self, start=1.0, stop=None, step=1.0):
it = fxrange(start, stop, step, repeatlast=True)
for task in self.tasks:
task.set(countdown=next(it))
return self

def __iter__(self):
return iter(self.tasks)

def __repr__(self):
return repr(self.tasks)

@property
def app(self):
return self._app or (self.tasks[0].app if self.tasks else current_app)

@property
def type(self):
if self._type:
return self._type
# taking the app from the first task in the list, there may be a
# better solution for this, e.g. to consolidate tasks with the same
# app and apply them in batches.
return self.app.tasks[self['task']]


@Signature.register_type
class chord(Signature):

def __init__(self, header, body=None, task='celery.chord',
args=(), kwargs={}, **options):
Signature.__init__(
self, task, args,
dict(kwargs, header=_maybe_group(header),
body=maybe_signature(body, app=self._app)), **options
)
self.subtask_type = 'chord'

def apply(self, args=(), kwargs={}, **options):
# For callbacks: extra args are prepended to the stored args.
args, kwargs, options = self._merge(args, kwargs, options)
return self.type.apply(args, kwargs, **options)

def freeze(self, _id=None, group_id=None, chord=None):
return self.body.freeze(_id, group_id=group_id, chord=chord)

@classmethod
def from_dict(self, d, app=None):
args, d['kwargs'] = self._unpack_args(**kwdict(d['kwargs']))
return _upgrade(d, self(*args, app=app, **kwdict(d)))

@staticmethod
def _unpack_args(header=None, body=None, **kwargs):
# Python signatures are better at extracting keys from dicts
# than manually popping things off.
return (header, body), kwargs

@property
def app(self):
# we will be able to fix this mess in 3.2 when we no longer
# require an actual task implementation for chord/group
if self._app:
return self._app
app = None if self.body is None else self.body.app
if app is None:
try:
app = self.tasks[0].app
except IndexError:
app = None
return app if app is not None else current_app

@property
def type(self):
if self._type:
return self._type
return self.app.tasks['celery.chord']

def delay(self, *partial_args, **partial_kwargs):
# There's no partial_kwargs for chord.
return self.apply_async(partial_args)

def apply_async(self, args=(), kwargs={}, task_id=None,
producer=None, publisher=None, connection=None,
router=None, result_cls=None, **options):
args = (tuple(args) + tuple(self.args)
if args and not self.immutable else self.args)
body = kwargs.get('body') or self.kwargs['body']
kwargs = dict(self.kwargs, **kwargs)
body = body.clone(**options)

_chord = self.type
if _chord.app.conf.CELERY_ALWAYS_EAGER:
return self.apply(args, kwargs, task_id=task_id, **options)
res = body.freeze(task_id)
parent = _chord(self.tasks, body, args, **options)
res.parent = parent
return res

def __call__(self, body=None, **options):
return self.apply_async(
(), {'body': body} if body else {}, **options)

def clone(self, *args, **kwargs):
s = Signature.clone(self, *args, **kwargs)
# need to make copy of body
try:
s.kwargs['body'] = s.kwargs['body'].clone()
except (AttributeError, KeyError):
pass
return s

def link(self, callback):
self.body.link(callback)
return callback

def link_error(self, errback):
self.body.link_error(errback)
return errback

def set_immutable(self, immutable):
# changes mutability of header only, not callback.
for task in self.tasks:
task.set_immutable(immutable)

def __repr__(self):
if self.body:
return self.body.reprcall(self.tasks)
return '<chord without body: {0.tasks!r}>'.format(self)

tasks = _getitem_property('kwargs.header')
body = _getitem_property('kwargs.body')


def signature(varies, args=(), kwargs={}, options={}, app=None, **kw):
if isinstance(varies, dict):
if isinstance(varies, Signature):
return varies.clone(app=app)
return Signature.from_dict(varies, app=app)
return Signature(varies, args, kwargs, options, app=app, **kw)
subtask = signature # XXX compat


def maybe_signature(d, app=None):
if d is not None:
if isinstance(d, dict):
if not isinstance(d, Signature):
return signature(d, app=app)
elif isinstance(d, list):
return [maybe_signature(s, app=app) for s in d]
if app is not None:
d._app = app
return d
maybe_subtask = maybe_signature # XXX compat

+ 0
- 29
thesisenv/lib/python3.6/site-packages/celery/concurrency/__init__.py View File

# -*- coding: utf-8 -*-
"""
celery.concurrency
~~~~~~~~~~~~~~~~~~

Pool implementation abstract factory, and alias definitions.

"""
from __future__ import absolute_import

# Import from kombu directly as it's used
# early in the import stage, where celery.utils loads
# too much (e.g. for eventlet patching)
from kombu.utils import symbol_by_name

__all__ = ['get_implementation']

ALIASES = {
'prefork': 'celery.concurrency.prefork:TaskPool',
'eventlet': 'celery.concurrency.eventlet:TaskPool',
'gevent': 'celery.concurrency.gevent:TaskPool',
'threads': 'celery.concurrency.threads:TaskPool',
'solo': 'celery.concurrency.solo:TaskPool',
'processes': 'celery.concurrency.prefork:TaskPool', # XXX compat alias
}


def get_implementation(cls):
return symbol_by_name(cls, ALIASES)

+ 0
- 1270
thesisenv/lib/python3.6/site-packages/celery/concurrency/asynpool.py
File diff suppressed because it is too large
View File


+ 0
- 171
thesisenv/lib/python3.6/site-packages/celery/concurrency/base.py View File

# -*- coding: utf-8 -*-
"""
celery.concurrency.base
~~~~~~~~~~~~~~~~~~~~~~~

TaskPool interface.

"""
from __future__ import absolute_import

import logging
import os
import sys

from billiard.einfo import ExceptionInfo
from billiard.exceptions import WorkerLostError
from kombu.utils.encoding import safe_repr

from celery.exceptions import WorkerShutdown, WorkerTerminate
from celery.five import monotonic, reraise
from celery.utils import timer2
from celery.utils.text import truncate
from celery.utils.log import get_logger

__all__ = ['BasePool', 'apply_target']

logger = get_logger('celery.pool')


def apply_target(target, args=(), kwargs={}, callback=None,
accept_callback=None, pid=None, getpid=os.getpid,
propagate=(), monotonic=monotonic, **_):
if accept_callback:
accept_callback(pid or getpid(), monotonic())
try:
ret = target(*args, **kwargs)
except propagate:
raise
except Exception:
raise
except (WorkerShutdown, WorkerTerminate):
raise
except BaseException as exc:
try:
reraise(WorkerLostError, WorkerLostError(repr(exc)),
sys.exc_info()[2])
except WorkerLostError:
callback(ExceptionInfo())
else:
callback(ret)


class BasePool(object):
RUN = 0x1
CLOSE = 0x2
TERMINATE = 0x3

Timer = timer2.Timer

#: set to true if the pool can be shutdown from within
#: a signal handler.
signal_safe = True

#: set to true if pool uses greenlets.
is_green = False

_state = None
_pool = None

#: only used by multiprocessing pool
uses_semaphore = False

task_join_will_block = True

def __init__(self, limit=None, putlocks=True,
forking_enable=True, callbacks_propagate=(), **options):
self.limit = limit
self.putlocks = putlocks
self.options = options
self.forking_enable = forking_enable
self.callbacks_propagate = callbacks_propagate
self._does_debug = logger.isEnabledFor(logging.DEBUG)

def on_start(self):
pass

def did_start_ok(self):
return True

def flush(self):
pass

def on_stop(self):
pass

def register_with_event_loop(self, loop):
pass

def on_apply(self, *args, **kwargs):
pass

def on_terminate(self):
pass

def on_soft_timeout(self, job):
pass

def on_hard_timeout(self, job):
pass

def maintain_pool(self, *args, **kwargs):
pass

def terminate_job(self, pid, signal=None):
raise NotImplementedError(
'{0} does not implement kill_job'.format(type(self)))

def restart(self):
raise NotImplementedError(
'{0} does not implement restart'.format(type(self)))

def stop(self):
self.on_stop()
self._state = self.TERMINATE

def terminate(self):
self._state = self.TERMINATE
self.on_terminate()

def start(self):
self.on_start()
self._state = self.RUN

def close(self):
self._state = self.CLOSE
self.on_close()

def on_close(self):
pass

def apply_async(self, target, args=[], kwargs={}, **options):
"""Equivalent of the :func:`apply` built-in function.

Callbacks should optimally return as soon as possible since
otherwise the thread which handles the result will get blocked.

"""
if self._does_debug:
logger.debug('TaskPool: Apply %s (args:%s kwargs:%s)',
target, truncate(safe_repr(args), 1024),
truncate(safe_repr(kwargs), 1024))

return self.on_apply(target, args, kwargs,
waitforslot=self.putlocks,
callbacks_propagate=self.callbacks_propagate,
**options)

def _get_info(self):
return {}

@property
def info(self):
return self._get_info()

@property
def active(self):
return self._state == self.RUN

@property
def num_processes(self):
return self.limit

+ 0
- 161
thesisenv/lib/python3.6/site-packages/celery/concurrency/eventlet.py View File

# -*- coding: utf-8 -*-
"""
celery.concurrency.eventlet
~~~~~~~~~~~~~~~~~~~~~~~~~~~

Eventlet pool implementation.

"""
from __future__ import absolute_import

import sys

from time import time

__all__ = ['TaskPool']

W_RACE = """\
Celery module with %s imported before eventlet patched\
"""
RACE_MODS = ('billiard.', 'celery.', 'kombu.')


#: Warn if we couldn't patch early enough,
#: and thread/socket depending celery modules have already been loaded.
for mod in (mod for mod in sys.modules if mod.startswith(RACE_MODS)):
for side in ('thread', 'threading', 'socket'): # pragma: no cover
if getattr(mod, side, None):
import warnings
warnings.warn(RuntimeWarning(W_RACE % side))


from celery import signals # noqa
from celery.utils import timer2 # noqa

from . import base # noqa


def apply_target(target, args=(), kwargs={}, callback=None,
accept_callback=None, getpid=None):
return base.apply_target(target, args, kwargs, callback, accept_callback,
pid=getpid())


class Schedule(timer2.Schedule):

def __init__(self, *args, **kwargs):
from eventlet.greenthread import spawn_after
from greenlet import GreenletExit
super(Schedule, self).__init__(*args, **kwargs)

self.GreenletExit = GreenletExit
self._spawn_after = spawn_after
self._queue = set()

def _enter(self, eta, priority, entry):
secs = max(eta - time(), 0)
g = self._spawn_after(secs, entry)
self._queue.add(g)
g.link(self._entry_exit, entry)
g.entry = entry
g.eta = eta
g.priority = priority
g.canceled = False
return g

def _entry_exit(self, g, entry):
try:
try:
g.wait()
except self.GreenletExit:
entry.cancel()
g.canceled = True
finally:
self._queue.discard(g)

def clear(self):
queue = self._queue
while queue:
try:
queue.pop().cancel()
except (KeyError, self.GreenletExit):
pass

@property
def queue(self):
return self._queue


class Timer(timer2.Timer):
Schedule = Schedule

def ensure_started(self):
pass

def stop(self):
self.schedule.clear()

def cancel(self, tref):
try:
tref.cancel()
except self.schedule.GreenletExit:
pass

def start(self):
pass


class TaskPool(base.BasePool):
Timer = Timer

signal_safe = False
is_green = True
task_join_will_block = False

def __init__(self, *args, **kwargs):
from eventlet import greenthread
from eventlet.greenpool import GreenPool
self.Pool = GreenPool
self.getcurrent = greenthread.getcurrent
self.getpid = lambda: id(greenthread.getcurrent())
self.spawn_n = greenthread.spawn_n

super(TaskPool, self).__init__(*args, **kwargs)

def on_start(self):
self._pool = self.Pool(self.limit)
signals.eventlet_pool_started.send(sender=self)
self._quick_put = self._pool.spawn_n
self._quick_apply_sig = signals.eventlet_pool_apply.send

def on_stop(self):
signals.eventlet_pool_preshutdown.send(sender=self)
if self._pool is not None:
self._pool.waitall()
signals.eventlet_pool_postshutdown.send(sender=self)

def on_apply(self, target, args=None, kwargs=None, callback=None,
accept_callback=None, **_):
self._quick_apply_sig(
sender=self, target=target, args=args, kwargs=kwargs,
)
self._quick_put(apply_target, target, args, kwargs,
callback, accept_callback,
self.getpid)

def grow(self, n=1):
limit = self.limit + n
self._pool.resize(limit)
self.limit = limit

def shrink(self, n=1):
limit = self.limit - n
self._pool.resize(limit)
self.limit = limit

def _get_info(self):
return {
'max-concurrency': self.limit,
'free-threads': self._pool.free(),
'running-threads': self._pool.running(),
}

+ 0
- 136
thesisenv/lib/python3.6/site-packages/celery/concurrency/gevent.py View File

# -*- coding: utf-8 -*-
"""
celery.concurrency.gevent
~~~~~~~~~~~~~~~~~~~~~~~~~

gevent pool implementation.

"""
from __future__ import absolute_import

from time import time

try:
from gevent import Timeout
except ImportError: # pragma: no cover
Timeout = None # noqa

from celery.utils import timer2

from .base import apply_target, BasePool

__all__ = ['TaskPool']


def apply_timeout(target, args=(), kwargs={}, callback=None,
accept_callback=None, pid=None, timeout=None,
timeout_callback=None, Timeout=Timeout,
apply_target=apply_target, **rest):
try:
with Timeout(timeout):
return apply_target(target, args, kwargs, callback,
accept_callback, pid,
propagate=(Timeout, ), **rest)
except Timeout:
return timeout_callback(False, timeout)


class Schedule(timer2.Schedule):

def __init__(self, *args, **kwargs):
from gevent.greenlet import Greenlet, GreenletExit

class _Greenlet(Greenlet):
cancel = Greenlet.kill

self._Greenlet = _Greenlet
self._GreenletExit = GreenletExit
super(Schedule, self).__init__(*args, **kwargs)
self._queue = set()

def _enter(self, eta, priority, entry):
secs = max(eta - time(), 0)
g = self._Greenlet.spawn_later(secs, entry)
self._queue.add(g)
g.link(self._entry_exit)
g.entry = entry
g.eta = eta
g.priority = priority
g.canceled = False
return g

def _entry_exit(self, g):
try:
g.kill()
finally:
self._queue.discard(g)

def clear(self):
queue = self._queue
while queue:
try:
queue.pop().kill()
except KeyError:
pass

@property
def queue(self):
return self._queue


class Timer(timer2.Timer):
Schedule = Schedule

def ensure_started(self):
pass

def stop(self):
self.schedule.clear()

def start(self):
pass


class TaskPool(BasePool):
Timer = Timer

signal_safe = False
is_green = True
task_join_will_block = False

def __init__(self, *args, **kwargs):
from gevent import spawn_raw
from gevent.pool import Pool
self.Pool = Pool
self.spawn_n = spawn_raw
self.timeout = kwargs.get('timeout')
super(TaskPool, self).__init__(*args, **kwargs)

def on_start(self):
self._pool = self.Pool(self.limit)
self._quick_put = self._pool.spawn

def on_stop(self):
if self._pool is not None:
self._pool.join()

def on_apply(self, target, args=None, kwargs=None, callback=None,
accept_callback=None, timeout=None,
timeout_callback=None, **_):
timeout = self.timeout if timeout is None else timeout
return self._quick_put(apply_timeout if timeout else apply_target,
target, args, kwargs, callback, accept_callback,
timeout=timeout,
timeout_callback=timeout_callback)

def grow(self, n=1):
self._pool._semaphore.counter += n
self._pool.size += n

def shrink(self, n=1):
self._pool._semaphore.counter -= n
self._pool.size -= n

@property
def num_processes(self):
return len(self._pool)

+ 0
- 178
thesisenv/lib/python3.6/site-packages/celery/concurrency/prefork.py View File

# -*- coding: utf-8 -*-
"""
celery.concurrency.prefork
~~~~~~~~~~~~~~~~~~~~~~~~~~

Pool implementation using :mod:`multiprocessing`.

"""
from __future__ import absolute_import

import os

from billiard import forking_enable
from billiard.pool import RUN, CLOSE, Pool as BlockingPool

from celery import platforms
from celery import signals
from celery._state import set_default_app, _set_task_join_will_block
from celery.app import trace
from celery.concurrency.base import BasePool
from celery.five import items
from celery.utils.functional import noop
from celery.utils.log import get_logger

from .asynpool import AsynPool

__all__ = ['TaskPool', 'process_initializer', 'process_destructor']

#: List of signals to reset when a child process starts.
WORKER_SIGRESET = frozenset(['SIGTERM',
'SIGHUP',
'SIGTTIN',
'SIGTTOU',
'SIGUSR1'])

#: List of signals to ignore when a child process starts.
WORKER_SIGIGNORE = frozenset(['SIGINT'])

logger = get_logger(__name__)
warning, debug = logger.warning, logger.debug


def process_initializer(app, hostname):
"""Pool child process initializer.

This will initialize a child pool process to ensure the correct
app instance is used and things like
logging works.

"""
_set_task_join_will_block(True)
platforms.signals.reset(*WORKER_SIGRESET)
platforms.signals.ignore(*WORKER_SIGIGNORE)
platforms.set_mp_process_title('celeryd', hostname=hostname)
# This is for Windows and other platforms not supporting
# fork(). Note that init_worker makes sure it's only
# run once per process.
app.loader.init_worker()
app.loader.init_worker_process()
logfile = os.environ.get('CELERY_LOG_FILE') or None
if logfile and '%i' in logfile.lower():
# logfile path will differ so need to set up logging again.
app.log.already_setup = False
app.log.setup(int(os.environ.get('CELERY_LOG_LEVEL', 0) or 0),
logfile,
bool(os.environ.get('CELERY_LOG_REDIRECT', False)),
str(os.environ.get('CELERY_LOG_REDIRECT_LEVEL')),
hostname=hostname)
if os.environ.get('FORKED_BY_MULTIPROCESSING'):
# pool did execv after fork
trace.setup_worker_optimizations(app)
else:
app.set_current()
set_default_app(app)
app.finalize()
trace._tasks = app._tasks # enables fast_trace_task optimization.
# rebuild execution handler for all tasks.
from celery.app.trace import build_tracer
for name, task in items(app.tasks):
task.__trace__ = build_tracer(name, task, app.loader, hostname,
app=app)
from celery.worker import state as worker_state
worker_state.reset_state()
signals.worker_process_init.send(sender=None)


def process_destructor(pid, exitcode):
"""Pool child process destructor

Dispatch the :signal:`worker_process_shutdown` signal.

"""
signals.worker_process_shutdown.send(
sender=None, pid=pid, exitcode=exitcode,
)


class TaskPool(BasePool):
"""Multiprocessing Pool implementation."""
Pool = AsynPool
BlockingPool = BlockingPool

uses_semaphore = True
write_stats = None

def on_start(self):
"""Run the task pool.

Will pre-fork all workers so they're ready to accept tasks.

"""
forking_enable(self.forking_enable)
Pool = (self.BlockingPool if self.options.get('threads', True)
else self.Pool)
P = self._pool = Pool(processes=self.limit,
initializer=process_initializer,
on_process_exit=process_destructor,
synack=False,
**self.options)

# Create proxy methods
self.on_apply = P.apply_async
self.maintain_pool = P.maintain_pool
self.terminate_job = P.terminate_job
self.grow = P.grow
self.shrink = P.shrink
self.flush = getattr(P, 'flush', None) # FIXME add to billiard

def restart(self):
self._pool.restart()
self._pool.apply_async(noop)

def did_start_ok(self):
return self._pool.did_start_ok()

def register_with_event_loop(self, loop):
try:
reg = self._pool.register_with_event_loop
except AttributeError:
return
return reg(loop)

def on_stop(self):
"""Gracefully stop the pool."""
if self._pool is not None and self._pool._state in (RUN, CLOSE):
self._pool.close()
self._pool.join()
self._pool = None

def on_terminate(self):
"""Force terminate the pool."""
if self._pool is not None:
self._pool.terminate()
self._pool = None

def on_close(self):
if self._pool is not None and self._pool._state == RUN:
self._pool.close()

def _get_info(self):
try:
write_stats = self._pool.human_write_stats
except AttributeError:
def write_stats():
return 'N/A' # only supported by asynpool
return {
'max-concurrency': self.limit,
'processes': [p.pid for p in self._pool._pool],
'max-tasks-per-child': self._pool._maxtasksperchild or 'N/A',
'put-guarded-by-semaphore': self.putlocks,
'timeouts': (self._pool.soft_timeout or 0,
self._pool.timeout or 0),
'writes': write_stats()
}

@property
def num_processes(self):
return self._pool._processes

+ 0
- 30
thesisenv/lib/python3.6/site-packages/celery/concurrency/solo.py View File

# -*- coding: utf-8 -*-
"""
celery.concurrency.solo
~~~~~~~~~~~~~~~~~~~~~~~

Single-threaded pool implementation.

"""
from __future__ import absolute_import

import os

from .base import BasePool, apply_target

__all__ = ['TaskPool']


class TaskPool(BasePool):
"""Solo task pool (blocking, inline, fast)."""

def __init__(self, *args, **kwargs):
super(TaskPool, self).__init__(*args, **kwargs)
self.on_apply = apply_target

def _get_info(self):
return {'max-concurrency': 1,
'processes': [os.getpid()],
'max-tasks-per-child': None,
'put-guarded-by-semaphore': True,
'timeouts': ()}

+ 0
- 57
thesisenv/lib/python3.6/site-packages/celery/concurrency/threads.py View File

# -*- coding: utf-8 -*-
"""
celery.concurrency.threads
~~~~~~~~~~~~~~~~~~~~~~~~~~

Pool implementation using threads.

"""
from __future__ import absolute_import

from celery.five import UserDict

from .base import apply_target, BasePool

__all__ = ['TaskPool']


class NullDict(UserDict):

def __setitem__(self, key, value):
pass


class TaskPool(BasePool):

def __init__(self, *args, **kwargs):
try:
import threadpool
except ImportError:
raise ImportError(
'The threaded pool requires the threadpool module.')
self.WorkRequest = threadpool.WorkRequest
self.ThreadPool = threadpool.ThreadPool
super(TaskPool, self).__init__(*args, **kwargs)

def on_start(self):
self._pool = self.ThreadPool(self.limit)
# threadpool stores all work requests until they are processed
# we don't need this dict, and it occupies way too much memory.
self._pool.workRequests = NullDict()
self._quick_put = self._pool.putRequest
self._quick_clear = self._pool._results_queue.queue.clear

def on_stop(self):
self._pool.dismissWorkers(self.limit, do_join=True)

def on_apply(self, target, args=None, kwargs=None, callback=None,
accept_callback=None, **_):
req = self.WorkRequest(apply_target, (target, args, kwargs, callback,
accept_callback))
self._quick_put(req)
# threadpool also has callback support,
# but for some reason the callback is not triggered
# before you've collected the results.
# Clear the results (if any), so it doesn't grow too large.
self._quick_clear()
return req

+ 0
- 0
thesisenv/lib/python3.6/site-packages/celery/contrib/__init__.py View File


+ 0
- 172
thesisenv/lib/python3.6/site-packages/celery/contrib/abortable.py View File

# -*- coding: utf-8 -*-
"""
=========================
Abortable tasks overview
=========================

For long-running :class:`Task`'s, it can be desirable to support
aborting during execution. Of course, these tasks should be built to
support abortion specifically.

The :class:`AbortableTask` serves as a base class for all :class:`Task`
objects that should support abortion by producers.

* Producers may invoke the :meth:`abort` method on
:class:`AbortableAsyncResult` instances, to request abortion.

* Consumers (workers) should periodically check (and honor!) the
:meth:`is_aborted` method at controlled points in their task's
:meth:`run` method. The more often, the better.

The necessary intermediate communication is dealt with by the
:class:`AbortableTask` implementation.

Usage example
-------------

In the consumer:

.. code-block:: python

from __future__ import absolute_import

from celery.contrib.abortable import AbortableTask
from celery.utils.log import get_task_logger

from proj.celery import app

logger = get_logger(__name__)

@app.task(bind=True, base=AbortableTask)
def long_running_task(self):
results = []
for i in range(100):
# check after every 5 iterations...
# (or alternatively, check when some timer is due)
if not i % 5:
if self.is_aborted():
# respect aborted state, and terminate gracefully.
logger.warning('Task aborted')
return
value = do_something_expensive(i)
results.append(y)
logger.info('Task complete')
return results

In the producer:

.. code-block:: python

from __future__ import absolute_import

import time

from proj.tasks import MyLongRunningTask

def myview(request):
# result is of type AbortableAsyncResult
result = long_running_task.delay()

# abort the task after 10 seconds
time.sleep(10)
result.abort()

After the `result.abort()` call, the task execution is not
aborted immediately. In fact, it is not guaranteed to abort at all. Keep
checking `result.state` status, or call `result.get(timeout=)` to
have it block until the task is finished.

.. note::

In order to abort tasks, there needs to be communication between the
producer and the consumer. This is currently implemented through the
database backend. Therefore, this class will only work with the
database backends.

"""
from __future__ import absolute_import

from celery import Task
from celery.result import AsyncResult

__all__ = ['AbortableAsyncResult', 'AbortableTask']


"""
Task States
-----------

.. state:: ABORTED

ABORTED
~~~~~~~

Task is aborted (typically by the producer) and should be
aborted as soon as possible.

"""
ABORTED = 'ABORTED'


class AbortableAsyncResult(AsyncResult):
"""Represents a abortable result.

Specifically, this gives the `AsyncResult` a :meth:`abort()` method,
which sets the state of the underlying Task to `'ABORTED'`.

"""

def is_aborted(self):
"""Return :const:`True` if the task is (being) aborted."""
return self.state == ABORTED

def abort(self):
"""Set the state of the task to :const:`ABORTED`.

Abortable tasks monitor their state at regular intervals and
terminate execution if so.

Be aware that invoking this method does not guarantee when the
task will be aborted (or even if the task will be aborted at
all).

"""
# TODO: store_result requires all four arguments to be set,
# but only status should be updated here
return self.backend.store_result(self.id, result=None,
status=ABORTED, traceback=None)


class AbortableTask(Task):
"""A celery task that serves as a base class for all :class:`Task`'s
that support aborting during execution.

All subclasses of :class:`AbortableTask` must call the
:meth:`is_aborted` method periodically and act accordingly when
the call evaluates to :const:`True`.

"""
abstract = True

def AsyncResult(self, task_id):
"""Return the accompanying AbortableAsyncResult instance."""
return AbortableAsyncResult(task_id, backend=self.backend)

def is_aborted(self, **kwargs):
"""Checks against the backend whether this
:class:`AbortableAsyncResult` is :const:`ABORTED`.

Always return :const:`False` in case the `task_id` parameter
refers to a regular (non-abortable) :class:`Task`.

Be aware that invoking this method will cause a hit in the
backend (for example a database query), so find a good balance
between calling it regularly (for responsiveness), but not too
often (for performance).

"""
task_id = kwargs.get('task_id', self.request.id)
result = self.AsyncResult(task_id)
if not isinstance(result, AbortableAsyncResult):
return False
return result.is_aborted()

+ 0
- 249
thesisenv/lib/python3.6/site-packages/celery/contrib/batches.py View File

# -*- coding: utf-8 -*-
"""
celery.contrib.batches
======================

Experimental task class that buffers messages and processes them as a list.

.. warning::

For this to work you have to set
:setting:`CELERYD_PREFETCH_MULTIPLIER` to zero, or some value where
the final multiplied value is higher than ``flush_every``.

In the future we hope to add the ability to direct batching tasks
to a channel with different QoS requirements than the task channel.

**Simple Example**

A click counter that flushes the buffer every 100 messages, and every
10 seconds. Does not do anything with the data, but can easily be modified
to store it in a database.

.. code-block:: python

# Flush after 100 messages, or 10 seconds.
@app.task(base=Batches, flush_every=100, flush_interval=10)
def count_click(requests):
from collections import Counter
count = Counter(request.kwargs['url'] for request in requests)
for url, count in count.items():
print('>>> Clicks: {0} -> {1}'.format(url, count))


Then you can ask for a click to be counted by doing::

>>> count_click.delay(url='http://example.com')

**Example returning results**

An interface to the Web of Trust API that flushes the buffer every 100
messages, and every 10 seconds.

.. code-block:: python

import requests
from urlparse import urlparse

from celery.contrib.batches import Batches

wot_api_target = 'https://api.mywot.com/0.4/public_link_json'

@app.task(base=Batches, flush_every=100, flush_interval=10)
def wot_api(requests):
sig = lambda url: url
reponses = wot_api_real(
(sig(*request.args, **request.kwargs) for request in requests)
)
# use mark_as_done to manually return response data
for response, request in zip(reponses, requests):
app.backend.mark_as_done(request.id, response)


def wot_api_real(urls):
domains = [urlparse(url).netloc for url in urls]
response = requests.get(
wot_api_target,
params={'hosts': ('/').join(set(domains)) + '/'}
)
return [response.json()[domain] for domain in domains]

Using the API is done as follows::

>>> wot_api.delay('http://example.com')

.. note::

If you don't have an ``app`` instance then use the current app proxy
instead::

from celery import current_app
app.backend.mark_as_done(request.id, response)

"""
from __future__ import absolute_import

from itertools import count

from celery.task import Task
from celery.five import Empty, Queue
from celery.utils.log import get_logger
from celery.worker.job import Request
from celery.utils import noop

__all__ = ['Batches']

logger = get_logger(__name__)


def consume_queue(queue):
"""Iterator yielding all immediately available items in a
:class:`Queue.Queue`.

The iterator stops as soon as the queue raises :exc:`Queue.Empty`.

*Examples*

>>> q = Queue()
>>> map(q.put, range(4))
>>> list(consume_queue(q))
[0, 1, 2, 3]
>>> list(consume_queue(q))
[]

"""
get = queue.get_nowait
while 1:
try:
yield get()
except Empty:
break


def apply_batches_task(task, args, loglevel, logfile):
task.push_request(loglevel=loglevel, logfile=logfile)
try:
result = task(*args)
except Exception as exc:
result = None
logger.error('Error: %r', exc, exc_info=True)
finally:
task.pop_request()
return result


class SimpleRequest(object):
"""Pickleable request."""

#: task id
id = None

#: task name
name = None

#: positional arguments
args = ()

#: keyword arguments
kwargs = {}

#: message delivery information.
delivery_info = None

#: worker node name
hostname = None

def __init__(self, id, name, args, kwargs, delivery_info, hostname):
self.id = id
self.name = name
self.args = args
self.kwargs = kwargs
self.delivery_info = delivery_info
self.hostname = hostname

@classmethod
def from_request(cls, request):
return cls(request.id, request.name, request.args,
request.kwargs, request.delivery_info, request.hostname)


class Batches(Task):
abstract = True

#: Maximum number of message in buffer.
flush_every = 10

#: Timeout in seconds before buffer is flushed anyway.
flush_interval = 30

def __init__(self):
self._buffer = Queue()
self._count = count(1)
self._tref = None
self._pool = None

def run(self, requests):
raise NotImplementedError('must implement run(requests)')

def Strategy(self, task, app, consumer):
self._pool = consumer.pool
hostname = consumer.hostname
eventer = consumer.event_dispatcher
Req = Request
connection_errors = consumer.connection_errors
timer = consumer.timer
put_buffer = self._buffer.put
flush_buffer = self._do_flush

def task_message_handler(message, body, ack, reject, callbacks, **kw):
request = Req(body, on_ack=ack, app=app, hostname=hostname,
events=eventer, task=task,
connection_errors=connection_errors,
delivery_info=message.delivery_info)
put_buffer(request)

if self._tref is None: # first request starts flush timer.
self._tref = timer.call_repeatedly(
self.flush_interval, flush_buffer,
)

if not next(self._count) % self.flush_every:
flush_buffer()

return task_message_handler

def flush(self, requests):
return self.apply_buffer(requests, ([SimpleRequest.from_request(r)
for r in requests], ))

def _do_flush(self):
logger.debug('Batches: Wake-up to flush buffer...')
requests = None
if self._buffer.qsize():
requests = list(consume_queue(self._buffer))
if requests:
logger.debug('Batches: Buffer complete: %s', len(requests))
self.flush(requests)
if not requests:
logger.debug('Batches: Canceling timer: Nothing in buffer.')
if self._tref:
self._tref.cancel() # cancel timer.
self._tref = None

def apply_buffer(self, requests, args=(), kwargs={}):
acks_late = [], []
[acks_late[r.task.acks_late].append(r) for r in requests]
assert requests and (acks_late[True] or acks_late[False])

def on_accepted(pid, time_accepted):
[req.acknowledge() for req in acks_late[False]]

def on_return(result):
[req.acknowledge() for req in acks_late[True]]

return self._pool.apply_async(
apply_batches_task,
(self, args, 0, None),
accept_callback=on_accepted,
callback=acks_late[True] and on_return or noop,
)

+ 0
- 126
thesisenv/lib/python3.6/site-packages/celery/contrib/methods.py View File

# -*- coding: utf-8 -*-
"""
celery.contrib.methods
======================

Task decorator that supports creating tasks out of methods.

Examples
--------

.. code-block:: python

from celery.contrib.methods import task

class X(object):

@task()
def add(self, x, y):
return x + y

or with any task decorator:

.. code-block:: python

from celery.contrib.methods import task_method

class X(object):

@app.task(filter=task_method)
def add(self, x, y):
return x + y

.. note::

The task must use the new Task base class (:class:`celery.Task`),
and the old base class using classmethods (``celery.task.Task``,
``celery.task.base.Task``).

This means that you have to use the task decorator from a Celery app
instance, and not the old-API:

.. code-block:: python


from celery import task # BAD
from celery.task import task # ALSO BAD

# GOOD:
app = Celery(...)

@app.task(filter=task_method)
def foo(self): pass

# ALSO GOOD:
from celery import current_app

@current_app.task(filter=task_method)
def foo(self): pass

# ALSO GOOD:
from celery import shared_task

@shared_task(filter=task_method)
def foo(self): pass

Caveats
-------

- Automatic naming won't be able to know what the class name is.

The name will still be module_name + task_name,
so two methods with the same name in the same module will collide
so that only one task can run:

.. code-block:: python

class A(object):

@task()
def add(self, x, y):
return x + y

class B(object):

@task()
def add(self, x, y):
return x + y

would have to be written as:

.. code-block:: python

class A(object):
@task(name='A.add')
def add(self, x, y):
return x + y

class B(object):
@task(name='B.add')
def add(self, x, y):
return x + y

"""

from __future__ import absolute_import

from celery import current_app

__all__ = ['task_method', 'task']


class task_method(object):

def __init__(self, task, *args, **kwargs):
self.task = task

def __get__(self, obj, type=None):
if obj is None:
return self.task
task = self.task.__class__()
task.__self__ = obj
return task


def task(*args, **kwargs):
return current_app.task(*args, **dict(kwargs, filter=task_method))

+ 0
- 365
thesisenv/lib/python3.6/site-packages/celery/contrib/migrate.py View File

# -*- coding: utf-8 -*-
"""
celery.contrib.migrate
~~~~~~~~~~~~~~~~~~~~~~

Migration tools.

"""
from __future__ import absolute_import, print_function, unicode_literals

import socket

from functools import partial
from itertools import cycle, islice

from kombu import eventloop, Queue
from kombu.common import maybe_declare
from kombu.utils.encoding import ensure_bytes

from celery.app import app_or_default
from celery.five import string, string_t
from celery.utils import worker_direct

__all__ = ['StopFiltering', 'State', 'republish', 'migrate_task',
'migrate_tasks', 'move', 'task_id_eq', 'task_id_in',
'start_filter', 'move_task_by_id', 'move_by_idmap',
'move_by_taskmap', 'move_direct', 'move_direct_by_id']

MOVING_PROGRESS_FMT = """\
Moving task {state.filtered}/{state.strtotal}: \
{body[task]}[{body[id]}]\
"""


class StopFiltering(Exception):
pass


class State(object):
count = 0
filtered = 0
total_apx = 0

@property
def strtotal(self):
if not self.total_apx:
return '?'
return string(self.total_apx)

def __repr__(self):
if self.filtered:
return '^{0.filtered}'.format(self)
return '{0.count}/{0.strtotal}'.format(self)


def republish(producer, message, exchange=None, routing_key=None,
remove_props=['application_headers',
'content_type',
'content_encoding',
'headers']):
body = ensure_bytes(message.body) # use raw message body.
info, headers, props = (message.delivery_info,
message.headers, message.properties)
exchange = info['exchange'] if exchange is None else exchange
routing_key = info['routing_key'] if routing_key is None else routing_key
ctype, enc = message.content_type, message.content_encoding
# remove compression header, as this will be inserted again
# when the message is recompressed.
compression = headers.pop('compression', None)

for key in remove_props:
props.pop(key, None)

producer.publish(ensure_bytes(body), exchange=exchange,
routing_key=routing_key, compression=compression,
headers=headers, content_type=ctype,
content_encoding=enc, **props)


def migrate_task(producer, body_, message, queues=None):
info = message.delivery_info
queues = {} if queues is None else queues
republish(producer, message,
exchange=queues.get(info['exchange']),
routing_key=queues.get(info['routing_key']))


def filter_callback(callback, tasks):

def filtered(body, message):
if tasks and body['task'] not in tasks:
return

return callback(body, message)
return filtered


def migrate_tasks(source, dest, migrate=migrate_task, app=None,
queues=None, **kwargs):
app = app_or_default(app)
queues = prepare_queues(queues)
producer = app.amqp.TaskProducer(dest)
migrate = partial(migrate, producer, queues=queues)

def on_declare_queue(queue):
new_queue = queue(producer.channel)
new_queue.name = queues.get(queue.name, queue.name)
if new_queue.routing_key == queue.name:
new_queue.routing_key = queues.get(queue.name,
new_queue.routing_key)
if new_queue.exchange.name == queue.name:
new_queue.exchange.name = queues.get(queue.name, queue.name)
new_queue.declare()

return start_filter(app, source, migrate, queues=queues,
on_declare_queue=on_declare_queue, **kwargs)


def _maybe_queue(app, q):
if isinstance(q, string_t):
return app.amqp.queues[q]
return q


def move(predicate, connection=None, exchange=None, routing_key=None,
source=None, app=None, callback=None, limit=None, transform=None,
**kwargs):
"""Find tasks by filtering them and move the tasks to a new queue.

:param predicate: Filter function used to decide which messages
to move. Must accept the standard signature of ``(body, message)``
used by Kombu consumer callbacks. If the predicate wants the message
to be moved it must return either:

1) a tuple of ``(exchange, routing_key)``, or

2) a :class:`~kombu.entity.Queue` instance, or

3) any other true value which means the specified
``exchange`` and ``routing_key`` arguments will be used.

:keyword connection: Custom connection to use.
:keyword source: Optional list of source queues to use instead of the
default (which is the queues in :setting:`CELERY_QUEUES`).
This list can also contain new :class:`~kombu.entity.Queue` instances.
:keyword exchange: Default destination exchange.
:keyword routing_key: Default destination routing key.
:keyword limit: Limit number of messages to filter.
:keyword callback: Callback called after message moved,
with signature ``(state, body, message)``.
:keyword transform: Optional function to transform the return
value (destination) of the filter function.

Also supports the same keyword arguments as :func:`start_filter`.

To demonstrate, the :func:`move_task_by_id` operation can be implemented
like this:

.. code-block:: python

def is_wanted_task(body, message):
if body['id'] == wanted_id:
return Queue('foo', exchange=Exchange('foo'),
routing_key='foo')

move(is_wanted_task)

or with a transform:

.. code-block:: python

def transform(value):
if isinstance(value, string_t):
return Queue(value, Exchange(value), value)
return value

move(is_wanted_task, transform=transform)

The predicate may also return a tuple of ``(exchange, routing_key)``
to specify the destination to where the task should be moved,
or a :class:`~kombu.entitiy.Queue` instance.
Any other true value means that the task will be moved to the
default exchange/routing_key.

"""
app = app_or_default(app)
queues = [_maybe_queue(app, queue) for queue in source or []] or None
with app.connection_or_acquire(connection, pool=False) as conn:
producer = app.amqp.TaskProducer(conn)
state = State()

def on_task(body, message):
ret = predicate(body, message)
if ret:
if transform:
ret = transform(ret)
if isinstance(ret, Queue):
maybe_declare(ret, conn.default_channel)
ex, rk = ret.exchange.name, ret.routing_key
else:
ex, rk = expand_dest(ret, exchange, routing_key)
republish(producer, message,
exchange=ex, routing_key=rk)
message.ack()

state.filtered += 1
if callback:
callback(state, body, message)
if limit and state.filtered >= limit:
raise StopFiltering()

return start_filter(app, conn, on_task, consume_from=queues, **kwargs)


def expand_dest(ret, exchange, routing_key):
try:
ex, rk = ret
except (TypeError, ValueError):
ex, rk = exchange, routing_key
return ex, rk


def task_id_eq(task_id, body, message):
return body['id'] == task_id


def task_id_in(ids, body, message):
return body['id'] in ids


def prepare_queues(queues):
if isinstance(queues, string_t):
queues = queues.split(',')
if isinstance(queues, list):
queues = dict(tuple(islice(cycle(q.split(':')), None, 2))
for q in queues)
if queues is None:
queues = {}
return queues


def start_filter(app, conn, filter, limit=None, timeout=1.0,
ack_messages=False, tasks=None, queues=None,
callback=None, forever=False, on_declare_queue=None,
consume_from=None, state=None, accept=None, **kwargs):
state = state or State()
queues = prepare_queues(queues)
consume_from = [_maybe_queue(app, q)
for q in consume_from or list(queues)]
if isinstance(tasks, string_t):
tasks = set(tasks.split(','))
if tasks is None:
tasks = set([])

def update_state(body, message):
state.count += 1
if limit and state.count >= limit:
raise StopFiltering()

def ack_message(body, message):
message.ack()

consumer = app.amqp.TaskConsumer(conn, queues=consume_from, accept=accept)

if tasks:
filter = filter_callback(filter, tasks)
update_state = filter_callback(update_state, tasks)
ack_message = filter_callback(ack_message, tasks)

consumer.register_callback(filter)
consumer.register_callback(update_state)
if ack_messages:
consumer.register_callback(ack_message)
if callback is not None:
callback = partial(callback, state)
if tasks:
callback = filter_callback(callback, tasks)
consumer.register_callback(callback)

# declare all queues on the new broker.
for queue in consumer.queues:
if queues and queue.name not in queues:
continue
if on_declare_queue is not None:
on_declare_queue(queue)
try:
_, mcount, _ = queue(consumer.channel).queue_declare(passive=True)
if mcount:
state.total_apx += mcount
except conn.channel_errors:
pass

# start migrating messages.
with consumer:
try:
for _ in eventloop(conn, # pragma: no cover
timeout=timeout, ignore_timeouts=forever):
pass
except socket.timeout:
pass
except StopFiltering:
pass
return state


def move_task_by_id(task_id, dest, **kwargs):
"""Find a task by id and move it to another queue.

:param task_id: Id of task to move.
:param dest: Destination queue.

Also supports the same keyword arguments as :func:`move`.

"""
return move_by_idmap({task_id: dest}, **kwargs)


def move_by_idmap(map, **kwargs):
"""Moves tasks by matching from a ``task_id: queue`` mapping,
where ``queue`` is a queue to move the task to.

Example::

>>> move_by_idmap({
... '5bee6e82-f4ac-468e-bd3d-13e8600250bc': Queue('name'),
... 'ada8652d-aef3-466b-abd2-becdaf1b82b3': Queue('name'),
... '3a2b140d-7db1-41ba-ac90-c36a0ef4ab1f': Queue('name')},
... queues=['hipri'])

"""
def task_id_in_map(body, message):
return map.get(body['id'])

# adding the limit means that we don't have to consume any more
# when we've found everything.
return move(task_id_in_map, limit=len(map), **kwargs)


def move_by_taskmap(map, **kwargs):
"""Moves tasks by matching from a ``task_name: queue`` mapping,
where ``queue`` is the queue to move the task to.

Example::

>>> move_by_taskmap({
... 'tasks.add': Queue('name'),
... 'tasks.mul': Queue('name'),
... })

"""

def task_name_in_map(body, message):
return map.get(body['task']) # <- name of task

return move(task_name_in_map, **kwargs)


def filter_status(state, body, message, **kwargs):
print(MOVING_PROGRESS_FMT.format(state=state, body=body, **kwargs))


move_direct = partial(move, transform=worker_direct)
move_direct_by_id = partial(move_task_by_id, transform=worker_direct)
move_direct_by_idmap = partial(move_by_idmap, transform=worker_direct)
move_direct_by_taskmap = partial(move_by_taskmap, transform=worker_direct)

+ 0
- 183
thesisenv/lib/python3.6/site-packages/celery/contrib/rdb.py View File

# -*- coding: utf-8 -*-
"""
celery.contrib.rdb
==================

Remote debugger for Celery tasks running in multiprocessing pool workers.
Inspired by http://snippets.dzone.com/posts/show/7248

**Usage**

.. code-block:: python

from celery.contrib import rdb
from celery import task

@task()
def add(x, y):
result = x + y
rdb.set_trace()
return result


**Environment Variables**

.. envvar:: CELERY_RDB_HOST

Hostname to bind to. Default is '127.0.01', which means the socket
will only be accessible from the local host.

.. envvar:: CELERY_RDB_PORT

Base port to bind to. Default is 6899.
The debugger will try to find an available port starting from the
base port. The selected port will be logged by the worker.

"""
from __future__ import absolute_import, print_function

import errno
import os
import socket
import sys

from pdb import Pdb

from billiard import current_process

from celery.five import range

__all__ = ['CELERY_RDB_HOST', 'CELERY_RDB_PORT', 'default_port',
'Rdb', 'debugger', 'set_trace']

default_port = 6899

CELERY_RDB_HOST = os.environ.get('CELERY_RDB_HOST') or '127.0.0.1'
CELERY_RDB_PORT = int(os.environ.get('CELERY_RDB_PORT') or default_port)

#: Holds the currently active debugger.
_current = [None]

_frame = getattr(sys, '_getframe')

NO_AVAILABLE_PORT = """\
{self.ident}: Couldn't find an available port.

Please specify one using the CELERY_RDB_PORT environment variable.
"""

BANNER = """\
{self.ident}: Please telnet into {self.host} {self.port}.

Type `exit` in session to continue.

{self.ident}: Waiting for client...
"""

SESSION_STARTED = '{self.ident}: Now in session with {self.remote_addr}.'
SESSION_ENDED = '{self.ident}: Session with {self.remote_addr} ended.'


class Rdb(Pdb):
me = 'Remote Debugger'
_prev_outs = None
_sock = None

def __init__(self, host=CELERY_RDB_HOST, port=CELERY_RDB_PORT,
port_search_limit=100, port_skew=+0, out=sys.stdout):
self.active = True
self.out = out

self._prev_handles = sys.stdin, sys.stdout

self._sock, this_port = self.get_avail_port(
host, port, port_search_limit, port_skew,
)
self._sock.setblocking(1)
self._sock.listen(1)
self.ident = '{0}:{1}'.format(self.me, this_port)
self.host = host
self.port = this_port
self.say(BANNER.format(self=self))

self._client, address = self._sock.accept()
self._client.setblocking(1)
self.remote_addr = ':'.join(str(v) for v in address)
self.say(SESSION_STARTED.format(self=self))
self._handle = sys.stdin = sys.stdout = self._client.makefile('rw')
Pdb.__init__(self, completekey='tab',
stdin=self._handle, stdout=self._handle)

def get_avail_port(self, host, port, search_limit=100, skew=+0):
try:
_, skew = current_process().name.split('-')
skew = int(skew)
except ValueError:
pass
this_port = None
for i in range(search_limit):
_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
this_port = port + skew + i
try:
_sock.bind((host, this_port))
except socket.error as exc:
if exc.errno in [errno.EADDRINUSE, errno.EINVAL]:
continue
raise
else:
return _sock, this_port
else:
raise Exception(NO_AVAILABLE_PORT.format(self=self))

def say(self, m):
print(m, file=self.out)

def __enter__(self):
return self

def __exit__(self, *exc_info):
self._close_session()

def _close_session(self):
self.stdin, self.stdout = sys.stdin, sys.stdout = self._prev_handles
if self.active:
if self._handle is not None:
self._handle.close()
if self._client is not None:
self._client.close()
if self._sock is not None:
self._sock.close()
self.active = False
self.say(SESSION_ENDED.format(self=self))

def do_continue(self, arg):
self._close_session()
self.set_continue()
return 1
do_c = do_cont = do_continue

def do_quit(self, arg):
self._close_session()
self.set_quit()
return 1
do_q = do_exit = do_quit

def set_quit(self):
# this raises a BdbQuit exception that we are unable to catch.
sys.settrace(None)


def debugger():
"""Return the current debugger instance (if any),
or creates a new one."""
rdb = _current[0]
if rdb is None or not rdb.active:
rdb = _current[0] = Rdb()
return rdb


def set_trace(frame=None):
"""Set breakpoint at current location, or a specified frame"""
if frame is None:
frame = _frame().f_back
return debugger().set_trace(frame)

+ 0
- 76
thesisenv/lib/python3.6/site-packages/celery/contrib/sphinx.py View File

# -*- coding: utf-8 -*-
"""
celery.contrib.sphinx
=====================

Sphinx documentation plugin

**Usage**

Add the extension to your :file:`docs/conf.py` configuration module:

.. code-block:: python

extensions = (...,
'celery.contrib.sphinx')

If you would like to change the prefix for tasks in reference documentation
then you can change the ``celery_task_prefix`` configuration value:

.. code-block:: python

celery_task_prefix = '(task)' # < default


With the extension installed `autodoc` will automatically find
task decorated objects and generate the correct (as well as
add a ``(task)`` prefix), and you can also refer to the tasks
using `:task:proj.tasks.add` syntax.

Use ``.. autotask::`` to manually document a task.

"""
from __future__ import absolute_import

try:
from inspect import formatargspec, getfullargspec as getargspec
except ImportError: # Py2
from inspect import formatargspec, getargspec # noqa

from sphinx.domains.python import PyModulelevel
from sphinx.ext.autodoc import FunctionDocumenter

from celery.app.task import BaseTask


class TaskDocumenter(FunctionDocumenter):
objtype = 'task'
member_order = 11

@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return isinstance(member, BaseTask) and getattr(member, '__wrapped__')

def format_args(self):
wrapped = getattr(self.object, '__wrapped__')
if wrapped is not None:
argspec = getargspec(wrapped)
fmt = formatargspec(*argspec)
fmt = fmt.replace('\\', '\\\\')
return fmt
return ''

def document_members(self, all_members=False):
pass


class TaskDirective(PyModulelevel):

def get_signature_prefix(self, sig):
return self.env.config.celery_task_prefix


def setup(app):
app.add_autodocumenter(TaskDocumenter)
app.domains['py'].directives['task'] = TaskDirective
app.add_config_value('celery_task_prefix', '(task)', True)

+ 0
- 671
thesisenv/lib/python3.6/site-packages/celery/datastructures.py View File

# -*- coding: utf-8 -*-
"""
celery.datastructures
~~~~~~~~~~~~~~~~~~~~~

Custom types and data structures.

"""
from __future__ import absolute_import, print_function, unicode_literals

import sys
import time

from collections import defaultdict, Mapping, MutableMapping, MutableSet
from heapq import heapify, heappush, heappop
from functools import partial
from itertools import chain

from billiard.einfo import ExceptionInfo # noqa
from kombu.utils.encoding import safe_str
from kombu.utils.limits import TokenBucket # noqa

from celery.five import items
from celery.utils.functional import LRUCache, first, uniq # noqa

try:
from django.utils.functional import LazyObject, LazySettings
except ImportError:
class LazyObject(object): # noqa
pass
LazySettings = LazyObject # noqa

DOT_HEAD = """
{IN}{type} {id} {{
{INp}graph [{attrs}]
"""
DOT_ATTR = '{name}={value}'
DOT_NODE = '{INp}"{0}" [{attrs}]'
DOT_EDGE = '{INp}"{0}" {dir} "{1}" [{attrs}]'
DOT_ATTRSEP = ', '
DOT_DIRS = {'graph': '--', 'digraph': '->'}
DOT_TAIL = '{IN}}}'

__all__ = ['GraphFormatter', 'CycleError', 'DependencyGraph',
'AttributeDictMixin', 'AttributeDict', 'DictAttribute',
'ConfigurationView', 'LimitedSet']


def force_mapping(m):
if isinstance(m, (LazyObject, LazySettings)):
m = m._wrapped
return DictAttribute(m) if not isinstance(m, Mapping) else m


class GraphFormatter(object):
_attr = DOT_ATTR.strip()
_node = DOT_NODE.strip()
_edge = DOT_EDGE.strip()
_head = DOT_HEAD.strip()
_tail = DOT_TAIL.strip()
_attrsep = DOT_ATTRSEP
_dirs = dict(DOT_DIRS)

scheme = {
'shape': 'box',
'arrowhead': 'vee',
'style': 'filled',
'fontname': 'HelveticaNeue',
}
edge_scheme = {
'color': 'darkseagreen4',
'arrowcolor': 'black',
'arrowsize': 0.7,
}
node_scheme = {'fillcolor': 'palegreen3', 'color': 'palegreen4'}
term_scheme = {'fillcolor': 'palegreen1', 'color': 'palegreen2'}
graph_scheme = {'bgcolor': 'mintcream'}

def __init__(self, root=None, type=None, id=None,
indent=0, inw=' ' * 4, **scheme):
self.id = id or 'dependencies'
self.root = root
self.type = type or 'digraph'
self.direction = self._dirs[self.type]
self.IN = inw * (indent or 0)
self.INp = self.IN + inw
self.scheme = dict(self.scheme, **scheme)
self.graph_scheme = dict(self.graph_scheme, root=self.label(self.root))

def attr(self, name, value):
value = '"{0}"'.format(value)
return self.FMT(self._attr, name=name, value=value)

def attrs(self, d, scheme=None):
d = dict(self.scheme, **dict(scheme, **d or {}) if scheme else d)
return self._attrsep.join(
safe_str(self.attr(k, v)) for k, v in items(d)
)

def head(self, **attrs):
return self.FMT(
self._head, id=self.id, type=self.type,
attrs=self.attrs(attrs, self.graph_scheme),
)

def tail(self):
return self.FMT(self._tail)

def label(self, obj):
return obj

def node(self, obj, **attrs):
return self.draw_node(obj, self.node_scheme, attrs)

def terminal_node(self, obj, **attrs):
return self.draw_node(obj, self.term_scheme, attrs)

def edge(self, a, b, **attrs):
return self.draw_edge(a, b, **attrs)

def _enc(self, s):
return s.encode('utf-8', 'ignore')

def FMT(self, fmt, *args, **kwargs):
return self._enc(fmt.format(
*args, **dict(kwargs, IN=self.IN, INp=self.INp)
))

def draw_edge(self, a, b, scheme=None, attrs=None):
return self.FMT(
self._edge, self.label(a), self.label(b),
dir=self.direction, attrs=self.attrs(attrs, self.edge_scheme),
)

def draw_node(self, obj, scheme=None, attrs=None):
return self.FMT(
self._node, self.label(obj), attrs=self.attrs(attrs, scheme),
)


class CycleError(Exception):
"""A cycle was detected in an acyclic graph."""


class DependencyGraph(object):
"""A directed acyclic graph of objects and their dependencies.

Supports a robust topological sort
to detect the order in which they must be handled.

Takes an optional iterator of ``(obj, dependencies)``
tuples to build the graph from.

.. warning::

Does not support cycle detection.

"""

def __init__(self, it=None, formatter=None):
self.formatter = formatter or GraphFormatter()
self.adjacent = {}
if it is not None:
self.update(it)

def add_arc(self, obj):
"""Add an object to the graph."""
self.adjacent.setdefault(obj, [])

def add_edge(self, A, B):
"""Add an edge from object ``A`` to object ``B``
(``A`` depends on ``B``)."""
self[A].append(B)

def connect(self, graph):
"""Add nodes from another graph."""
self.adjacent.update(graph.adjacent)

def topsort(self):
"""Sort the graph topologically.

:returns: a list of objects in the order
in which they must be handled.

"""
graph = DependencyGraph()
components = self._tarjan72()

NC = dict((node, component)
for component in components
for node in component)
for component in components:
graph.add_arc(component)
for node in self:
node_c = NC[node]
for successor in self[node]:
successor_c = NC[successor]
if node_c != successor_c:
graph.add_edge(node_c, successor_c)
return [t[0] for t in graph._khan62()]

def valency_of(self, obj):
"""Return the valency (degree) of a vertex in the graph."""
try:
l = [len(self[obj])]
except KeyError:
return 0
for node in self[obj]:
l.append(self.valency_of(node))
return sum(l)

def update(self, it):
"""Update the graph with data from a list
of ``(obj, dependencies)`` tuples."""
tups = list(it)
for obj, _ in tups:
self.add_arc(obj)
for obj, deps in tups:
for dep in deps:
self.add_edge(obj, dep)

def edges(self):
"""Return generator that yields for all edges in the graph."""
return (obj for obj, adj in items(self) if adj)

def _khan62(self):
"""Khans simple topological sort algorithm from '62

See http://en.wikipedia.org/wiki/Topological_sorting

"""
count = defaultdict(lambda: 0)
result = []

for node in self:
for successor in self[node]:
count[successor] += 1
ready = [node for node in self if not count[node]]

while ready:
node = ready.pop()
result.append(node)

for successor in self[node]:
count[successor] -= 1
if count[successor] == 0:
ready.append(successor)
result.reverse()
return result

def _tarjan72(self):
"""Tarjan's algorithm to find strongly connected components.

See http://bit.ly/vIMv3h.

"""
result, stack, low = [], [], {}

def visit(node):
if node in low:
return
num = len(low)
low[node] = num
stack_pos = len(stack)
stack.append(node)

for successor in self[node]:
visit(successor)
low[node] = min(low[node], low[successor])

if num == low[node]:
component = tuple(stack[stack_pos:])
stack[stack_pos:] = []
result.append(component)
for item in component:
low[item] = len(self)

for node in self:
visit(node)

return result

def to_dot(self, fh, formatter=None):
"""Convert the graph to DOT format.

:param fh: A file, or a file-like object to write the graph to.

"""
seen = set()
draw = formatter or self.formatter
P = partial(print, file=fh)

def if_not_seen(fun, obj):
if draw.label(obj) not in seen:
P(fun(obj))
seen.add(draw.label(obj))

P(draw.head())
for obj, adjacent in items(self):
if not adjacent:
if_not_seen(draw.terminal_node, obj)
for req in adjacent:
if_not_seen(draw.node, obj)
P(draw.edge(obj, req))
P(draw.tail())

def format(self, obj):
return self.formatter(obj) if self.formatter else obj

def __iter__(self):
return iter(self.adjacent)

def __getitem__(self, node):
return self.adjacent[node]

def __len__(self):
return len(self.adjacent)

def __contains__(self, obj):
return obj in self.adjacent

def _iterate_items(self):
return items(self.adjacent)
items = iteritems = _iterate_items

def __repr__(self):
return '\n'.join(self.repr_node(N) for N in self)

def repr_node(self, obj, level=1, fmt='{0}({1})'):
output = [fmt.format(obj, self.valency_of(obj))]
if obj in self:
for other in self[obj]:
d = fmt.format(other, self.valency_of(other))
output.append(' ' * level + d)
output.extend(self.repr_node(other, level + 1).split('\n')[1:])
return '\n'.join(output)


class AttributeDictMixin(object):
"""Augment classes with a Mapping interface by adding attribute access.

I.e. `d.key -> d[key]`.

"""

def __getattr__(self, k):
"""`d.key -> d[key]`"""
try:
return self[k]
except KeyError:
raise AttributeError(
'{0!r} object has no attribute {1!r}'.format(
type(self).__name__, k))

def __setattr__(self, key, value):
"""`d[key] = value -> d.key = value`"""
self[key] = value


class AttributeDict(dict, AttributeDictMixin):
"""Dict subclass with attribute access."""
pass


class DictAttribute(object):
"""Dict interface to attributes.

`obj[k] -> obj.k`
`obj[k] = val -> obj.k = val`

"""
obj = None

def __init__(self, obj):
object.__setattr__(self, 'obj', obj)

def __getattr__(self, key):
return getattr(self.obj, key)

def __setattr__(self, key, value):
return setattr(self.obj, key, value)

def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default

def setdefault(self, key, default):
try:
return self[key]
except KeyError:
self[key] = default
return default

def __getitem__(self, key):
try:
return getattr(self.obj, key)
except AttributeError:
raise KeyError(key)

def __setitem__(self, key, value):
setattr(self.obj, key, value)

def __contains__(self, key):
return hasattr(self.obj, key)

def _iterate_keys(self):
return iter(dir(self.obj))
iterkeys = _iterate_keys

def __iter__(self):
return self._iterate_keys()

def _iterate_items(self):
for key in self._iterate_keys():
yield key, getattr(self.obj, key)
iteritems = _iterate_items

def _iterate_values(self):
for key in self._iterate_keys():
yield getattr(self.obj, key)
itervalues = _iterate_values

if sys.version_info[0] == 3: # pragma: no cover
items = _iterate_items
keys = _iterate_keys
values = _iterate_values
else:

def keys(self):
return list(self)

def items(self):
return list(self._iterate_items())

def values(self):
return list(self._iterate_values())
MutableMapping.register(DictAttribute)


class ConfigurationView(AttributeDictMixin):
"""A view over an applications configuration dicts.

Custom (but older) version of :class:`collections.ChainMap`.

If the key does not exist in ``changes``, the ``defaults`` dicts
are consulted.

:param changes: Dict containing changes to the configuration.
:param defaults: List of dicts containing the default configuration.

"""
changes = None
defaults = None
_order = None

def __init__(self, changes, defaults):
self.__dict__.update(changes=changes, defaults=defaults,
_order=[changes] + defaults)

def add_defaults(self, d):
d = force_mapping(d)
self.defaults.insert(0, d)
self._order.insert(1, d)

def __getitem__(self, key):
for d in self._order:
try:
return d[key]
except KeyError:
pass
raise KeyError(key)

def __setitem__(self, key, value):
self.changes[key] = value

def first(self, *keys):
return first(None, (self.get(key) for key in keys))

def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default

def clear(self):
"""Remove all changes, but keep defaults."""
self.changes.clear()

def setdefault(self, key, default):
try:
return self[key]
except KeyError:
self[key] = default
return default

def update(self, *args, **kwargs):
return self.changes.update(*args, **kwargs)

def __contains__(self, key):
return any(key in m for m in self._order)

def __bool__(self):
return any(self._order)
__nonzero__ = __bool__ # Py2

def __repr__(self):
return repr(dict(items(self)))

def __iter__(self):
return self._iterate_keys()

def __len__(self):
# The logic for iterating keys includes uniq(),
# so to be safe we count by explicitly iterating
return len(set().union(*self._order))

def _iter(self, op):
# defaults must be first in the stream, so values in
# changes takes precedence.
return chain(*[op(d) for d in reversed(self._order)])

def _iterate_keys(self):
return uniq(self._iter(lambda d: d))
iterkeys = _iterate_keys

def _iterate_items(self):
return ((key, self[key]) for key in self)
iteritems = _iterate_items

def _iterate_values(self):
return (self[key] for key in self)
itervalues = _iterate_values

if sys.version_info[0] == 3: # pragma: no cover
keys = _iterate_keys
items = _iterate_items
values = _iterate_values

else: # noqa
def keys(self):
return list(self._iterate_keys())

def items(self):
return list(self._iterate_items())

def values(self):
return list(self._iterate_values())

MutableMapping.register(ConfigurationView)


class LimitedSet(object):
"""Kind-of Set with limitations.

Good for when you need to test for membership (`a in set`),
but the set should not grow unbounded.

:keyword maxlen: Maximum number of members before we start
evicting expired members.
:keyword expires: Time in seconds, before a membership expires.

"""

def __init__(self, maxlen=None, expires=None, data=None, heap=None):
# heap is ignored
self.maxlen = maxlen
self.expires = expires
self._data = {} if data is None else data
self._heap = []

# make shortcuts
self.__len__ = self._heap.__len__
self.__contains__ = self._data.__contains__

self._refresh_heap()

def _refresh_heap(self):
self._heap[:] = [(t, key) for key, t in items(self._data)]
heapify(self._heap)

def add(self, key, now=time.time, heappush=heappush):
"""Add a new member."""
# offset is there to modify the length of the list,
# this way we can expire an item before inserting the value,
# and it will end up in the correct order.
self.purge(1, offset=1)
inserted = now()
self._data[key] = inserted
heappush(self._heap, (inserted, key))

def clear(self):
"""Remove all members"""
self._data.clear()
self._heap[:] = []

def discard(self, value):
"""Remove membership by finding value."""
try:
itime = self._data[value]
except KeyError:
return
try:
self._heap.remove((itime, value))
except ValueError:
pass
self._data.pop(value, None)
pop_value = discard # XXX compat

def purge(self, limit=None, offset=0, now=time.time):
"""Purge expired items."""
H, maxlen = self._heap, self.maxlen
if not maxlen:
return

# If the data/heap gets corrupted and limit is None
# this will go into an infinite loop, so limit must
# have a value to guard the loop.
limit = len(self) + offset if limit is None else limit

i = 0
while len(self) + offset > maxlen:
if i >= limit:
break
try:
item = heappop(H)
except IndexError:
break
if self.expires:
if now() < item[0] + self.expires:
heappush(H, item)
break
try:
self._data.pop(item[1])
except KeyError: # out of sync with heap
pass
i += 1

def update(self, other):
if isinstance(other, LimitedSet):
self._data.update(other._data)
self._refresh_heap()
else:
for obj in other:
self.add(obj)

def as_dict(self):
return self._data

def __eq__(self, other):
return self._heap == other._heap

def __ne__(self, other):
return not self.__eq__(other)

def __repr__(self):
return 'LimitedSet({0})'.format(len(self))

def __iter__(self):
return (item[1] for item in self._heap)

def __len__(self):
return len(self._heap)

def __contains__(self, key):
return key in self._data

def __reduce__(self):
return self.__class__, (self.maxlen, self.expires, self._data)
MutableSet.register(LimitedSet)

+ 0
- 408
thesisenv/lib/python3.6/site-packages/celery/events/__init__.py View File

# -*- coding: utf-8 -*-
"""
celery.events
~~~~~~~~~~~~~

Events is a stream of messages sent for certain actions occurring
in the worker (and clients if :setting:`CELERY_SEND_TASK_SENT_EVENT`
is enabled), used for monitoring purposes.

"""
from __future__ import absolute_import

import os
import time
import threading
import warnings

from collections import deque
from contextlib import contextmanager
from copy import copy
from operator import itemgetter

from kombu import Exchange, Queue, Producer
from kombu.connection import maybe_channel
from kombu.mixins import ConsumerMixin
from kombu.utils import cached_property

from celery.app import app_or_default
from celery.utils import anon_nodename, uuid
from celery.utils.functional import dictfilter
from celery.utils.timeutils import adjust_timestamp, utcoffset, maybe_s_to_ms

__all__ = ['Events', 'Event', 'EventDispatcher', 'EventReceiver']

event_exchange = Exchange('celeryev', type='topic')

_TZGETTER = itemgetter('utcoffset', 'timestamp')

W_YAJL = """
anyjson is currently using the yajl library.
This json implementation is broken, it severely truncates floats
so timestamps will not work.

Please uninstall yajl or force anyjson to use a different library.
"""

CLIENT_CLOCK_SKEW = -1


def get_exchange(conn):
ex = copy(event_exchange)
if conn.transport.driver_type == 'redis':
# quick hack for Issue #436
ex.type = 'fanout'
return ex


def Event(type, _fields=None, __dict__=dict, __now__=time.time, **fields):
"""Create an event.

An event is a dictionary, the only required field is ``type``.
A ``timestamp`` field will be set to the current time if not provided.

"""
event = __dict__(_fields, **fields) if _fields else fields
if 'timestamp' not in event:
event.update(timestamp=__now__(), type=type)
else:
event['type'] = type
return event


def group_from(type):
"""Get the group part of an event type name.

E.g.::

>>> group_from('task-sent')
'task'

>>> group_from('custom-my-event')
'custom'

"""
return type.split('-', 1)[0]


class EventDispatcher(object):
"""Dispatches event messages.

:param connection: Connection to the broker.

:keyword hostname: Hostname to identify ourselves as,
by default uses the hostname returned by
:func:`~celery.utils.anon_nodename`.

:keyword groups: List of groups to send events for. :meth:`send` will
ignore send requests to groups not in this list.
If this is :const:`None`, all events will be sent. Example groups
include ``"task"`` and ``"worker"``.

:keyword enabled: Set to :const:`False` to not actually publish any events,
making :meth:`send` a noop operation.

:keyword channel: Can be used instead of `connection` to specify
an exact channel to use when sending events.

:keyword buffer_while_offline: If enabled events will be buffered
while the connection is down. :meth:`flush` must be called
as soon as the connection is re-established.

You need to :meth:`close` this after use.

"""
DISABLED_TRANSPORTS = set(['sql'])

app = None

# set of callbacks to be called when :meth:`enabled`.
on_enabled = None

# set of callbacks to be called when :meth:`disabled`.
on_disabled = None

def __init__(self, connection=None, hostname=None, enabled=True,
channel=None, buffer_while_offline=True, app=None,
serializer=None, groups=None):
self.app = app_or_default(app or self.app)
self.connection = connection
self.channel = channel
self.hostname = hostname or anon_nodename()
self.buffer_while_offline = buffer_while_offline
self.mutex = threading.Lock()
self.producer = None
self._outbound_buffer = deque()
self.serializer = serializer or self.app.conf.CELERY_EVENT_SERIALIZER
self.on_enabled = set()
self.on_disabled = set()
self.groups = set(groups or [])
self.tzoffset = [-time.timezone, -time.altzone]
self.clock = self.app.clock
if not connection and channel:
self.connection = channel.connection.client
self.enabled = enabled
conninfo = self.connection or self.app.connection()
self.exchange = get_exchange(conninfo)
if conninfo.transport.driver_type in self.DISABLED_TRANSPORTS:
self.enabled = False
if self.enabled:
self.enable()
self.headers = {'hostname': self.hostname}
self.pid = os.getpid()
self.warn_if_yajl()

def warn_if_yajl(self):
import anyjson
if anyjson.implementation.name == 'yajl':
warnings.warn(UserWarning(W_YAJL))

def __enter__(self):
return self

def __exit__(self, *exc_info):
self.close()

def enable(self):
self.producer = Producer(self.channel or self.connection,
exchange=self.exchange,
serializer=self.serializer)
self.enabled = True
for callback in self.on_enabled:
callback()

def disable(self):
if self.enabled:
self.enabled = False
self.close()
for callback in self.on_disabled:
callback()

def publish(self, type, fields, producer, retry=False,
retry_policy=None, blind=False, utcoffset=utcoffset,
Event=Event):
"""Publish event using a custom :class:`~kombu.Producer`
instance.

:param type: Event type name, with group separated by dash (`-`).
:param fields: Dictionary of event fields, must be json serializable.
:param producer: :class:`~kombu.Producer` instance to use,
only the ``publish`` method will be called.
:keyword retry: Retry in the event of connection failure.
:keyword retry_policy: Dict of custom retry policy, see
:meth:`~kombu.Connection.ensure`.
:keyword blind: Don't set logical clock value (also do not forward
the internal logical clock).
:keyword Event: Event type used to create event,
defaults to :func:`Event`.
:keyword utcoffset: Function returning the current utcoffset in hours.

"""

with self.mutex:
clock = None if blind else self.clock.forward()
event = Event(type, hostname=self.hostname, utcoffset=utcoffset(),
pid=self.pid, clock=clock, **fields)
exchange = self.exchange
producer.publish(
event,
routing_key=type.replace('-', '.'),
exchange=exchange.name,
retry=retry,
retry_policy=retry_policy,
declare=[exchange],
serializer=self.serializer,
headers=self.headers,
)

def send(self, type, blind=False, **fields):
"""Send event.

:param type: Event type name, with group separated by dash (`-`).
:keyword retry: Retry in the event of connection failure.
:keyword retry_policy: Dict of custom retry policy, see
:meth:`~kombu.Connection.ensure`.
:keyword blind: Don't set logical clock value (also do not forward
the internal logical clock).
:keyword Event: Event type used to create event,
defaults to :func:`Event`.
:keyword utcoffset: Function returning the current utcoffset in hours.
:keyword \*\*fields: Event fields, must be json serializable.

"""
if self.enabled:
groups = self.groups
if groups and group_from(type) not in groups:
return
try:
self.publish(type, fields, self.producer, blind)
except Exception as exc:
if not self.buffer_while_offline:
raise
self._outbound_buffer.append((type, fields, exc))

def flush(self):
"""Flushes the outbound buffer."""
while self._outbound_buffer:
try:
type, fields, _ = self._outbound_buffer.popleft()
except IndexError:
return
self.send(type, **fields)

def extend_buffer(self, other):
"""Copies the outbound buffer of another instance."""
self._outbound_buffer.extend(other._outbound_buffer)

def close(self):
"""Close the event dispatcher."""
self.mutex.locked() and self.mutex.release()
self.producer = None

def _get_publisher(self):
return self.producer

def _set_publisher(self, producer):
self.producer = producer
publisher = property(_get_publisher, _set_publisher) # XXX compat


class EventReceiver(ConsumerMixin):
"""Capture events.

:param connection: Connection to the broker.
:keyword handlers: Event handlers.

:attr:`handlers` is a dict of event types and their handlers,
the special handler `"*"` captures all events that doesn't have a
handler.

"""
app = None

def __init__(self, channel, handlers=None, routing_key='#',
node_id=None, app=None, queue_prefix='celeryev',
accept=None):
self.app = app_or_default(app or self.app)
self.channel = maybe_channel(channel)
self.handlers = {} if handlers is None else handlers
self.routing_key = routing_key
self.node_id = node_id or uuid()
self.queue_prefix = queue_prefix
self.exchange = get_exchange(self.connection or self.app.connection())
self.queue = Queue('.'.join([self.queue_prefix, self.node_id]),
exchange=self.exchange,
routing_key=self.routing_key,
auto_delete=True,
durable=False,
queue_arguments=self._get_queue_arguments())
self.clock = self.app.clock
self.adjust_clock = self.clock.adjust
self.forward_clock = self.clock.forward
if accept is None:
accept = set([self.app.conf.CELERY_EVENT_SERIALIZER, 'json'])
self.accept = accept

def _get_queue_arguments(self):
conf = self.app.conf
return dictfilter({
'x-message-ttl': maybe_s_to_ms(conf.CELERY_EVENT_QUEUE_TTL),
'x-expires': maybe_s_to_ms(conf.CELERY_EVENT_QUEUE_EXPIRES),
})

def process(self, type, event):
"""Process the received event by dispatching it to the appropriate
handler."""
handler = self.handlers.get(type) or self.handlers.get('*')
handler and handler(event)

def get_consumers(self, Consumer, channel):
return [Consumer(queues=[self.queue],
callbacks=[self._receive], no_ack=True,
accept=self.accept)]

def on_consume_ready(self, connection, channel, consumers,
wakeup=True, **kwargs):
if wakeup:
self.wakeup_workers(channel=channel)

def itercapture(self, limit=None, timeout=None, wakeup=True):
return self.consume(limit=limit, timeout=timeout, wakeup=wakeup)

def capture(self, limit=None, timeout=None, wakeup=True):
"""Open up a consumer capturing events.

This has to run in the main process, and it will never stop
unless :attr:`EventDispatcher.should_stop` is set to True, or
forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`.

"""
return list(self.consume(limit=limit, timeout=timeout, wakeup=wakeup))

def wakeup_workers(self, channel=None):
self.app.control.broadcast('heartbeat',
connection=self.connection,
channel=channel)

def event_from_message(self, body, localize=True,
now=time.time, tzfields=_TZGETTER,
adjust_timestamp=adjust_timestamp,
CLIENT_CLOCK_SKEW=CLIENT_CLOCK_SKEW):
type = body['type']
if type == 'task-sent':
# clients never sync so cannot use their clock value
_c = body['clock'] = (self.clock.value or 1) + CLIENT_CLOCK_SKEW
self.adjust_clock(_c)
else:
try:
clock = body['clock']
except KeyError:
body['clock'] = self.forward_clock()
else:
self.adjust_clock(clock)

if localize:
try:
offset, timestamp = tzfields(body)
except KeyError:
pass
else:
body['timestamp'] = adjust_timestamp(timestamp, offset)
body['local_received'] = now()
return type, body

def _receive(self, body, message):
self.process(*self.event_from_message(body))

@property
def connection(self):
return self.channel.connection.client if self.channel else None


class Events(object):

def __init__(self, app=None):
self.app = app

@cached_property
def Receiver(self):
return self.app.subclass_with_self(EventReceiver,
reverse='events.Receiver')

@cached_property
def Dispatcher(self):
return self.app.subclass_with_self(EventDispatcher,
reverse='events.Dispatcher')

@cached_property
def State(self):
return self.app.subclass_with_self('celery.events.state:State',
reverse='events.State')

@contextmanager
def default_dispatcher(self, hostname=None, enabled=True,
buffer_while_offline=False):
with self.app.amqp.producer_pool.acquire(block=True) as prod:
with self.Dispatcher(prod.connection, hostname, enabled,
prod.channel, buffer_while_offline) as d:
yield d

+ 0
- 544
thesisenv/lib/python3.6/site-packages/celery/events/cursesmon.py View File

# -*- coding: utf-8 -*-
"""
celery.events.cursesmon
~~~~~~~~~~~~~~~~~~~~~~~

Graphical monitor of Celery events using curses.

"""
from __future__ import absolute_import, print_function

import curses
import sys
import threading

from datetime import datetime
from itertools import count
from textwrap import wrap
from time import time
from math import ceil

from celery import VERSION_BANNER
from celery import states
from celery.app import app_or_default
from celery.five import items, values
from celery.utils.text import abbr, abbrtask

__all__ = ['CursesMonitor', 'evtop']

BORDER_SPACING = 4
LEFT_BORDER_OFFSET = 3
UUID_WIDTH = 36
STATE_WIDTH = 8
TIMESTAMP_WIDTH = 8
MIN_WORKER_WIDTH = 15
MIN_TASK_WIDTH = 16

# this module is considered experimental
# we don't care about coverage.

STATUS_SCREEN = """\
events: {s.event_count} tasks:{s.task_count} workers:{w_alive}/{w_all}
"""


class CursesMonitor(object): # pragma: no cover
keymap = {}
win = None
screen_width = None
screen_delay = 10
selected_task = None
selected_position = 0
selected_str = 'Selected: '
foreground = curses.COLOR_BLACK
background = curses.COLOR_WHITE
online_str = 'Workers online: '
help_title = 'Keys: '
help = ('j:down k:up i:info t:traceback r:result c:revoke ^c: quit')
greet = 'celery events {0}'.format(VERSION_BANNER)
info_str = 'Info: '

def __init__(self, state, app, keymap=None):
self.app = app
self.keymap = keymap or self.keymap
self.state = state
default_keymap = {'J': self.move_selection_down,
'K': self.move_selection_up,
'C': self.revoke_selection,
'T': self.selection_traceback,
'R': self.selection_result,
'I': self.selection_info,
'L': self.selection_rate_limit}
self.keymap = dict(default_keymap, **self.keymap)
self.lock = threading.RLock()

def format_row(self, uuid, task, worker, timestamp, state):
mx = self.display_width

# include spacing
detail_width = mx - 1 - STATE_WIDTH - 1 - TIMESTAMP_WIDTH
uuid_space = detail_width - 1 - MIN_TASK_WIDTH - 1 - MIN_WORKER_WIDTH

if uuid_space < UUID_WIDTH:
uuid_width = uuid_space
else:
uuid_width = UUID_WIDTH

detail_width = detail_width - uuid_width - 1
task_width = int(ceil(detail_width / 2.0))
worker_width = detail_width - task_width - 1

uuid = abbr(uuid, uuid_width).ljust(uuid_width)
worker = abbr(worker, worker_width).ljust(worker_width)
task = abbrtask(task, task_width).ljust(task_width)
state = abbr(state, STATE_WIDTH).ljust(STATE_WIDTH)
timestamp = timestamp.ljust(TIMESTAMP_WIDTH)

row = '{0} {1} {2} {3} {4} '.format(uuid, worker, task,
timestamp, state)
if self.screen_width is None:
self.screen_width = len(row[:mx])
return row[:mx]

@property
def screen_width(self):
_, mx = self.win.getmaxyx()
return mx

@property
def screen_height(self):
my, _ = self.win.getmaxyx()
return my

@property
def display_width(self):
_, mx = self.win.getmaxyx()
return mx - BORDER_SPACING

@property
def display_height(self):
my, _ = self.win.getmaxyx()
return my - 10

@property
def limit(self):
return self.display_height

def find_position(self):
if not self.tasks:
return 0
for i, e in enumerate(self.tasks):
if self.selected_task == e[0]:
return i
return 0

def move_selection_up(self):
self.move_selection(-1)

def move_selection_down(self):
self.move_selection(1)

def move_selection(self, direction=1):
if not self.tasks:
return
pos = self.find_position()
try:
self.selected_task = self.tasks[pos + direction][0]
except IndexError:
self.selected_task = self.tasks[0][0]

keyalias = {curses.KEY_DOWN: 'J',
curses.KEY_UP: 'K',
curses.KEY_ENTER: 'I'}

def handle_keypress(self):
try:
key = self.win.getkey().upper()
except:
return
key = self.keyalias.get(key) or key
handler = self.keymap.get(key)
if handler is not None:
handler()

def alert(self, callback, title=None):
self.win.erase()
my, mx = self.win.getmaxyx()
y = blank_line = count(2)
if title:
self.win.addstr(next(y), 3, title,
curses.A_BOLD | curses.A_UNDERLINE)
next(blank_line)
callback(my, mx, next(y))
self.win.addstr(my - 1, 0, 'Press any key to continue...',
curses.A_BOLD)
self.win.refresh()
while 1:
try:
return self.win.getkey().upper()
except:
pass

def selection_rate_limit(self):
if not self.selected_task:
return curses.beep()
task = self.state.tasks[self.selected_task]
if not task.name:
return curses.beep()

my, mx = self.win.getmaxyx()
r = 'New rate limit: '
self.win.addstr(my - 2, 3, r, curses.A_BOLD | curses.A_UNDERLINE)
self.win.addstr(my - 2, len(r) + 3, ' ' * (mx - len(r)))
rlimit = self.readline(my - 2, 3 + len(r))

if rlimit:
reply = self.app.control.rate_limit(task.name,
rlimit.strip(), reply=True)
self.alert_remote_control_reply(reply)

def alert_remote_control_reply(self, reply):

def callback(my, mx, xs):
y = count(xs)
if not reply:
self.win.addstr(
next(y), 3, 'No replies received in 1s deadline.',
curses.A_BOLD + curses.color_pair(2),
)
return

for subreply in reply:
curline = next(y)

host, response = next(items(subreply))
host = '{0}: '.format(host)
self.win.addstr(curline, 3, host, curses.A_BOLD)
attr = curses.A_NORMAL
text = ''
if 'error' in response:
text = response['error']
attr |= curses.color_pair(2)
elif 'ok' in response:
text = response['ok']
attr |= curses.color_pair(3)
self.win.addstr(curline, 3 + len(host), text, attr)

return self.alert(callback, 'Remote Control Command Replies')

def readline(self, x, y):
buffer = str()
curses.echo()
try:
i = 0
while 1:
ch = self.win.getch(x, y + i)
if ch != -1:
if ch in (10, curses.KEY_ENTER): # enter
break
if ch in (27, ):
buffer = str()
break
buffer += chr(ch)
i += 1
finally:
curses.noecho()
return buffer

def revoke_selection(self):
if not self.selected_task:
return curses.beep()
reply = self.app.control.revoke(self.selected_task, reply=True)
self.alert_remote_control_reply(reply)

def selection_info(self):
if not self.selected_task:
return

def alert_callback(mx, my, xs):
my, mx = self.win.getmaxyx()
y = count(xs)
task = self.state.tasks[self.selected_task]
info = task.info(extra=['state'])
infoitems = [
('args', info.pop('args', None)),
('kwargs', info.pop('kwargs', None))
] + list(info.items())
for key, value in infoitems:
if key is None:
continue
value = str(value)
curline = next(y)
keys = key + ': '
self.win.addstr(curline, 3, keys, curses.A_BOLD)
wrapped = wrap(value, mx - 2)
if len(wrapped) == 1:
self.win.addstr(
curline, len(keys) + 3,
abbr(wrapped[0],
self.screen_width - (len(keys) + 3)))
else:
for subline in wrapped:
nexty = next(y)
if nexty >= my - 1:
subline = ' ' * 4 + '[...]'
elif nexty >= my:
break
self.win.addstr(
nexty, 3,
abbr(' ' * 4 + subline, self.screen_width - 4),
curses.A_NORMAL,
)

return self.alert(
alert_callback, 'Task details for {0.selected_task}'.format(self),
)

def selection_traceback(self):
if not self.selected_task:
return curses.beep()
task = self.state.tasks[self.selected_task]
if task.state not in states.EXCEPTION_STATES:
return curses.beep()

def alert_callback(my, mx, xs):
y = count(xs)
for line in task.traceback.split('\n'):
self.win.addstr(next(y), 3, line)

return self.alert(
alert_callback,
'Task Exception Traceback for {0.selected_task}'.format(self),
)

def selection_result(self):
if not self.selected_task:
return

def alert_callback(my, mx, xs):
y = count(xs)
task = self.state.tasks[self.selected_task]
result = (getattr(task, 'result', None) or
getattr(task, 'exception', None))
for line in wrap(result or '', mx - 2):
self.win.addstr(next(y), 3, line)

return self.alert(
alert_callback,
'Task Result for {0.selected_task}'.format(self),
)

def display_task_row(self, lineno, task):
state_color = self.state_colors.get(task.state)
attr = curses.A_NORMAL
if task.uuid == self.selected_task:
attr = curses.A_STANDOUT
timestamp = datetime.utcfromtimestamp(
task.timestamp or time(),
)
timef = timestamp.strftime('%H:%M:%S')
hostname = task.worker.hostname if task.worker else '*NONE*'
line = self.format_row(task.uuid, task.name,
hostname,
timef, task.state)
self.win.addstr(lineno, LEFT_BORDER_OFFSET, line, attr)

if state_color:
self.win.addstr(lineno,
len(line) - STATE_WIDTH + BORDER_SPACING - 1,
task.state, state_color | attr)

def draw(self):
with self.lock:
win = self.win
self.handle_keypress()
x = LEFT_BORDER_OFFSET
y = blank_line = count(2)
my, mx = win.getmaxyx()
win.erase()
win.bkgd(' ', curses.color_pair(1))
win.border()
win.addstr(1, x, self.greet, curses.A_DIM | curses.color_pair(5))
next(blank_line)
win.addstr(next(y), x, self.format_row('UUID', 'TASK',
'WORKER', 'TIME', 'STATE'),
curses.A_BOLD | curses.A_UNDERLINE)
tasks = self.tasks
if tasks:
for row, (uuid, task) in enumerate(tasks):
if row > self.display_height:
break

if task.uuid:
lineno = next(y)
self.display_task_row(lineno, task)

# -- Footer
next(blank_line)
win.hline(my - 6, x, curses.ACS_HLINE, self.screen_width - 4)

# Selected Task Info
if self.selected_task:
win.addstr(my - 5, x, self.selected_str, curses.A_BOLD)
info = 'Missing extended info'
detail = ''
try:
selection = self.state.tasks[self.selected_task]
except KeyError:
pass
else:
info = selection.info()
if 'runtime' in info:
info['runtime'] = '{0:.2f}'.format(info['runtime'])
if 'result' in info:
info['result'] = abbr(info['result'], 16)
info = ' '.join(
'{0}={1}'.format(key, value)
for key, value in items(info)
)
detail = '... -> key i'
infowin = abbr(info,
self.screen_width - len(self.selected_str) - 2,
detail)
win.addstr(my - 5, x + len(self.selected_str), infowin)
# Make ellipsis bold
if detail in infowin:
detailpos = len(infowin) - len(detail)
win.addstr(my - 5, x + len(self.selected_str) + detailpos,
detail, curses.A_BOLD)
else:
win.addstr(my - 5, x, 'No task selected', curses.A_NORMAL)

# Workers
if self.workers:
win.addstr(my - 4, x, self.online_str, curses.A_BOLD)
win.addstr(my - 4, x + len(self.online_str),
', '.join(sorted(self.workers)), curses.A_NORMAL)
else:
win.addstr(my - 4, x, 'No workers discovered.')

# Info
win.addstr(my - 3, x, self.info_str, curses.A_BOLD)
win.addstr(
my - 3, x + len(self.info_str),
STATUS_SCREEN.format(
s=self.state,
w_alive=len([w for w in values(self.state.workers)
if w.alive]),
w_all=len(self.state.workers),
),
curses.A_DIM,
)

# Help
self.safe_add_str(my - 2, x, self.help_title, curses.A_BOLD)
self.safe_add_str(my - 2, x + len(self.help_title), self.help,
curses.A_DIM)
win.refresh()

def safe_add_str(self, y, x, string, *args, **kwargs):
if x + len(string) > self.screen_width:
string = string[:self.screen_width - x]
self.win.addstr(y, x, string, *args, **kwargs)

def init_screen(self):
with self.lock:
self.win = curses.initscr()
self.win.nodelay(True)
self.win.keypad(True)
curses.start_color()
curses.init_pair(1, self.foreground, self.background)
# exception states
curses.init_pair(2, curses.COLOR_RED, self.background)
# successful state
curses.init_pair(3, curses.COLOR_GREEN, self.background)
# revoked state
curses.init_pair(4, curses.COLOR_MAGENTA, self.background)
# greeting
curses.init_pair(5, curses.COLOR_BLUE, self.background)
# started state
curses.init_pair(6, curses.COLOR_YELLOW, self.foreground)

self.state_colors = {states.SUCCESS: curses.color_pair(3),
states.REVOKED: curses.color_pair(4),
states.STARTED: curses.color_pair(6)}
for state in states.EXCEPTION_STATES:
self.state_colors[state] = curses.color_pair(2)

curses.cbreak()

def resetscreen(self):
with self.lock:
curses.nocbreak()
self.win.keypad(False)
curses.echo()
curses.endwin()

def nap(self):
curses.napms(self.screen_delay)

@property
def tasks(self):
return list(self.state.tasks_by_time(limit=self.limit))

@property
def workers(self):
return [hostname for hostname, w in items(self.state.workers)
if w.alive]


class DisplayThread(threading.Thread): # pragma: no cover

def __init__(self, display):
self.display = display
self.shutdown = False
threading.Thread.__init__(self)

def run(self):
while not self.shutdown:
self.display.draw()
self.display.nap()


def capture_events(app, state, display): # pragma: no cover

def on_connection_error(exc, interval):
print('Connection Error: {0!r}. Retry in {1}s.'.format(
exc, interval), file=sys.stderr)

while 1:
print('-> evtop: starting capture...', file=sys.stderr)
with app.connection() as conn:
try:
conn.ensure_connection(on_connection_error,
app.conf.BROKER_CONNECTION_MAX_RETRIES)
recv = app.events.Receiver(conn, handlers={'*': state.event})
display.resetscreen()
display.init_screen()
recv.capture()
except conn.connection_errors + conn.channel_errors as exc:
print('Connection lost: {0!r}'.format(exc), file=sys.stderr)


def evtop(app=None): # pragma: no cover
app = app_or_default(app)
state = app.events.State()
display = CursesMonitor(state, app)
display.init_screen()
refresher = DisplayThread(display)
refresher.start()
try:
capture_events(app, state, display)
except Exception:
refresher.shutdown = True
refresher.join()
display.resetscreen()
raise
except (KeyboardInterrupt, SystemExit):
refresher.shutdown = True
refresher.join()
display.resetscreen()


if __name__ == '__main__': # pragma: no cover
evtop()

+ 0
- 109
thesisenv/lib/python3.6/site-packages/celery/events/dumper.py View File

# -*- coding: utf-8 -*-
"""
celery.events.dumper
~~~~~~~~~~~~~~~~~~~~

This is a simple program that dumps events to the console
as they happen. Think of it like a `tcpdump` for Celery events.

"""
from __future__ import absolute_import, print_function

import sys

from datetime import datetime

from celery.app import app_or_default
from celery.utils.functional import LRUCache
from celery.utils.timeutils import humanize_seconds

__all__ = ['Dumper', 'evdump']

TASK_NAMES = LRUCache(limit=0xFFF)

HUMAN_TYPES = {'worker-offline': 'shutdown',
'worker-online': 'started',
'worker-heartbeat': 'heartbeat'}

CONNECTION_ERROR = """\
-> Cannot connect to %s: %s.
Trying again %s
"""


def humanize_type(type):
try:
return HUMAN_TYPES[type.lower()]
except KeyError:
return type.lower().replace('-', ' ')


class Dumper(object):

def __init__(self, out=sys.stdout):
self.out = out

def say(self, msg):
print(msg, file=self.out)
# need to flush so that output can be piped.
try:
self.out.flush()
except AttributeError:
pass

def on_event(self, ev):
timestamp = datetime.utcfromtimestamp(ev.pop('timestamp'))
type = ev.pop('type').lower()
hostname = ev.pop('hostname')
if type.startswith('task-'):
uuid = ev.pop('uuid')
if type in ('task-received', 'task-sent'):
task = TASK_NAMES[uuid] = '{0}({1}) args={2} kwargs={3}' \
.format(ev.pop('name'), uuid,
ev.pop('args'),
ev.pop('kwargs'))
else:
task = TASK_NAMES.get(uuid, '')
return self.format_task_event(hostname, timestamp,
type, task, ev)
fields = ', '.join(
'{0}={1}'.format(key, ev[key]) for key in sorted(ev)
)
sep = fields and ':' or ''
self.say('{0} [{1}] {2}{3} {4}'.format(
hostname, timestamp, humanize_type(type), sep, fields),
)

def format_task_event(self, hostname, timestamp, type, task, event):
fields = ', '.join(
'{0}={1}'.format(key, event[key]) for key in sorted(event)
)
sep = fields and ':' or ''
self.say('{0} [{1}] {2}{3} {4} {5}'.format(
hostname, timestamp, humanize_type(type), sep, task, fields),
)


def evdump(app=None, out=sys.stdout):
app = app_or_default(app)
dumper = Dumper(out=out)
dumper.say('-> evdump: starting capture...')
conn = app.connection().clone()

def _error_handler(exc, interval):
dumper.say(CONNECTION_ERROR % (
conn.as_uri(), exc, humanize_seconds(interval, 'in', ' ')
))

while 1:
try:
conn.ensure_connection(_error_handler)
recv = app.events.Receiver(conn, handlers={'*': dumper.on_event})
recv.capture()
except (KeyboardInterrupt, SystemExit):
return conn and conn.close()
except conn.connection_errors + conn.channel_errors:
dumper.say('-> Connection lost, attempting reconnect')

if __name__ == '__main__': # pragma: no cover
evdump()

+ 0
- 114
thesisenv/lib/python3.6/site-packages/celery/events/snapshot.py View File

# -*- coding: utf-8 -*-
"""
celery.events.snapshot
~~~~~~~~~~~~~~~~~~~~~~

Consuming the events as a stream is not always suitable
so this module implements a system to take snapshots of the
state of a cluster at regular intervals. There is a full
implementation of this writing the snapshots to a database
in :mod:`djcelery.snapshots` in the `django-celery` distribution.

"""
from __future__ import absolute_import

from kombu.utils.limits import TokenBucket

from celery import platforms
from celery.app import app_or_default
from celery.utils.timer2 import Timer
from celery.utils.dispatch import Signal
from celery.utils.imports import instantiate
from celery.utils.log import get_logger
from celery.utils.timeutils import rate

__all__ = ['Polaroid', 'evcam']

logger = get_logger('celery.evcam')


class Polaroid(object):
timer = None
shutter_signal = Signal(providing_args=('state', ))
cleanup_signal = Signal()
clear_after = False

_tref = None
_ctref = None

def __init__(self, state, freq=1.0, maxrate=None,
cleanup_freq=3600.0, timer=None, app=None):
self.app = app_or_default(app)
self.state = state
self.freq = freq
self.cleanup_freq = cleanup_freq
self.timer = timer or self.timer or Timer()
self.logger = logger
self.maxrate = maxrate and TokenBucket(rate(maxrate))

def install(self):
self._tref = self.timer.call_repeatedly(self.freq, self.capture)
self._ctref = self.timer.call_repeatedly(
self.cleanup_freq, self.cleanup,
)

def on_shutter(self, state):
pass

def on_cleanup(self):
pass

def cleanup(self):
logger.debug('Cleanup: Running...')
self.cleanup_signal.send(None)
self.on_cleanup()

def shutter(self):
if self.maxrate is None or self.maxrate.can_consume():
logger.debug('Shutter: %s', self.state)
self.shutter_signal.send(self.state)
self.on_shutter(self.state)

def capture(self):
self.state.freeze_while(self.shutter, clear_after=self.clear_after)

def cancel(self):
if self._tref:
self._tref() # flush all received events.
self._tref.cancel()
if self._ctref:
self._ctref.cancel()

def __enter__(self):
self.install()
return self

def __exit__(self, *exc_info):
self.cancel()


def evcam(camera, freq=1.0, maxrate=None, loglevel=0,
logfile=None, pidfile=None, timer=None, app=None):
app = app_or_default(app)

if pidfile:
platforms.create_pidlock(pidfile)

app.log.setup_logging_subsystem(loglevel, logfile)

print('-> evcam: Taking snapshots with {0} (every {1} secs.)'.format(
camera, freq))
state = app.events.State()
cam = instantiate(camera, state, app=app, freq=freq,
maxrate=maxrate, timer=timer)
cam.install()
conn = app.connection()
recv = app.events.Receiver(conn, handlers={'*': state.event})
try:
try:
recv.capture(limit=None)
except KeyboardInterrupt:
raise SystemExit
finally:
cam.cancel()
conn.close()

+ 0
- 656
thesisenv/lib/python3.6/site-packages/celery/events/state.py View File

# -*- coding: utf-8 -*-
"""
celery.events.state
~~~~~~~~~~~~~~~~~~~

This module implements a datastructure used to keep
track of the state of a cluster of workers and the tasks
it is working on (by consuming events).

For every event consumed the state is updated,
so the state represents the state of the cluster
at the time of the last event.

Snapshots (:mod:`celery.events.snapshot`) can be used to
take "pictures" of this state at regular intervals
to e.g. store that in a database.

"""
from __future__ import absolute_import

import bisect
import sys
import threading

from datetime import datetime
from decimal import Decimal
from itertools import islice
from operator import itemgetter
from time import time
from weakref import ref

from kombu.clocks import timetuple
from kombu.utils import cached_property, kwdict

from celery import states
from celery.five import class_property, items, values
from celery.utils import deprecated
from celery.utils.functional import LRUCache, memoize
from celery.utils.log import get_logger

PYPY = hasattr(sys, 'pypy_version_info')

# The window (in percentage) is added to the workers heartbeat
# frequency. If the time between updates exceeds this window,
# then the worker is considered to be offline.
HEARTBEAT_EXPIRE_WINDOW = 200

# Max drift between event timestamp and time of event received
# before we alert that clocks may be unsynchronized.
HEARTBEAT_DRIFT_MAX = 16

DRIFT_WARNING = """\
Substantial drift from %s may mean clocks are out of sync. Current drift is
%s seconds. [orig: %s recv: %s]
"""

CAN_KWDICT = sys.version_info >= (2, 6, 5)

logger = get_logger(__name__)
warn = logger.warning

R_STATE = '<State: events={0.event_count} tasks={0.task_count}>'
R_WORKER = '<Worker: {0.hostname} ({0.status_string} clock:{0.clock})'
R_TASK = '<Task: {0.name}({0.uuid}) {0.state} clock:{0.clock}>'

__all__ = ['Worker', 'Task', 'State', 'heartbeat_expires']


@memoize(maxsize=1000, keyfun=lambda a, _: a[0])
def _warn_drift(hostname, drift, local_received, timestamp):
# we use memoize here so the warning is only logged once per hostname
warn(DRIFT_WARNING, hostname, drift,
datetime.fromtimestamp(local_received),
datetime.fromtimestamp(timestamp))


def heartbeat_expires(timestamp, freq=60,
expire_window=HEARTBEAT_EXPIRE_WINDOW,
Decimal=Decimal, float=float, isinstance=isinstance):
# some json implementations returns decimal.Decimal objects,
# which are not compatible with float.
freq = float(freq) if isinstance(freq, Decimal) else freq
if isinstance(timestamp, Decimal):
timestamp = float(timestamp)
return timestamp + (freq * (expire_window / 1e2))


def _depickle_task(cls, fields):
return cls(**(fields if CAN_KWDICT else kwdict(fields)))


def with_unique_field(attr):

def _decorate_cls(cls):

def __eq__(this, other):
if isinstance(other, this.__class__):
return getattr(this, attr) == getattr(other, attr)
return NotImplemented
cls.__eq__ = __eq__

def __ne__(this, other):
return not this.__eq__(other)
cls.__ne__ = __ne__

def __hash__(this):
return hash(getattr(this, attr))
cls.__hash__ = __hash__

return cls
return _decorate_cls


@with_unique_field('hostname')
class Worker(object):
"""Worker State."""
heartbeat_max = 4
expire_window = HEARTBEAT_EXPIRE_WINDOW

_fields = ('hostname', 'pid', 'freq', 'heartbeats', 'clock',
'active', 'processed', 'loadavg', 'sw_ident',
'sw_ver', 'sw_sys')
if not PYPY:
__slots__ = _fields + ('event', '__dict__', '__weakref__')

def __init__(self, hostname=None, pid=None, freq=60,
heartbeats=None, clock=0, active=None, processed=None,
loadavg=None, sw_ident=None, sw_ver=None, sw_sys=None):
self.hostname = hostname
self.pid = pid
self.freq = freq
self.heartbeats = [] if heartbeats is None else heartbeats
self.clock = clock or 0
self.active = active
self.processed = processed
self.loadavg = loadavg
self.sw_ident = sw_ident
self.sw_ver = sw_ver
self.sw_sys = sw_sys
self.event = self._create_event_handler()

def __reduce__(self):
return self.__class__, (self.hostname, self.pid, self.freq,
self.heartbeats, self.clock, self.active,
self.processed, self.loadavg, self.sw_ident,
self.sw_ver, self.sw_sys)

def _create_event_handler(self):
_set = object.__setattr__
hbmax = self.heartbeat_max
heartbeats = self.heartbeats
hb_pop = self.heartbeats.pop
hb_append = self.heartbeats.append

def event(type_, timestamp=None,
local_received=None, fields=None,
max_drift=HEARTBEAT_DRIFT_MAX, items=items, abs=abs, int=int,
insort=bisect.insort, len=len):
fields = fields or {}
for k, v in items(fields):
_set(self, k, v)
if type_ == 'offline':
heartbeats[:] = []
else:
if not local_received or not timestamp:
return
drift = abs(int(local_received) - int(timestamp))
if drift > HEARTBEAT_DRIFT_MAX:
_warn_drift(self.hostname, drift,
local_received, timestamp)
if local_received:
hearts = len(heartbeats)
if hearts > hbmax - 1:
hb_pop(0)
if hearts and local_received > heartbeats[-1]:
hb_append(local_received)
else:
insort(heartbeats, local_received)
return event

def update(self, f, **kw):
for k, v in items(dict(f, **kw) if kw else f):
setattr(self, k, v)

def __repr__(self):
return R_WORKER.format(self)

@property
def status_string(self):
return 'ONLINE' if self.alive else 'OFFLINE'

@property
def heartbeat_expires(self):
return heartbeat_expires(self.heartbeats[-1],
self.freq, self.expire_window)

@property
def alive(self, nowfun=time):
return bool(self.heartbeats and nowfun() < self.heartbeat_expires)

@property
def id(self):
return '{0.hostname}.{0.pid}'.format(self)

@deprecated(3.2, 3.3)
def update_heartbeat(self, received, timestamp):
self.event(None, timestamp, received)

@deprecated(3.2, 3.3)
def on_online(self, timestamp=None, local_received=None, **fields):
self.event('online', timestamp, local_received, fields)

@deprecated(3.2, 3.3)
def on_offline(self, timestamp=None, local_received=None, **fields):
self.event('offline', timestamp, local_received, fields)

@deprecated(3.2, 3.3)
def on_heartbeat(self, timestamp=None, local_received=None, **fields):
self.event('heartbeat', timestamp, local_received, fields)

@class_property
def _defaults(cls):
"""Deprecated, to be removed in 3.3"""
source = cls()
return dict((k, getattr(source, k)) for k in cls._fields)


@with_unique_field('uuid')
class Task(object):
"""Task State."""
name = received = sent = started = succeeded = failed = retried = \
revoked = args = kwargs = eta = expires = retries = worker = result = \
exception = timestamp = runtime = traceback = exchange = \
routing_key = client = None
state = states.PENDING
clock = 0

_fields = ('uuid', 'name', 'state', 'received', 'sent', 'started',
'succeeded', 'failed', 'retried', 'revoked', 'args', 'kwargs',
'eta', 'expires', 'retries', 'worker', 'result', 'exception',
'timestamp', 'runtime', 'traceback', 'exchange', 'routing_key',
'clock', 'client')
if not PYPY:
__slots__ = ('__dict__', '__weakref__')

#: How to merge out of order events.
#: Disorder is detected by logical ordering (e.g. :event:`task-received`
#: must have happened before a :event:`task-failed` event).
#:
#: A merge rule consists of a state and a list of fields to keep from
#: that state. ``(RECEIVED, ('name', 'args')``, means the name and args
#: fields are always taken from the RECEIVED state, and any values for
#: these fields received before or after is simply ignored.
merge_rules = {states.RECEIVED: ('name', 'args', 'kwargs',
'retries', 'eta', 'expires')}

#: meth:`info` displays these fields by default.
_info_fields = ('args', 'kwargs', 'retries', 'result', 'eta', 'runtime',
'expires', 'exception', 'exchange', 'routing_key')

def __init__(self, uuid=None, **kwargs):
self.uuid = uuid
if kwargs:
for k, v in items(kwargs):
setattr(self, k, v)

def event(self, type_, timestamp=None, local_received=None, fields=None,
precedence=states.precedence, items=items, dict=dict,
PENDING=states.PENDING, RECEIVED=states.RECEIVED,
STARTED=states.STARTED, FAILURE=states.FAILURE,
RETRY=states.RETRY, SUCCESS=states.SUCCESS,
REVOKED=states.REVOKED):
fields = fields or {}
if type_ == 'sent':
state, self.sent = PENDING, timestamp
elif type_ == 'received':
state, self.received = RECEIVED, timestamp
elif type_ == 'started':
state, self.started = STARTED, timestamp
elif type_ == 'failed':
state, self.failed = FAILURE, timestamp
elif type_ == 'retried':
state, self.retried = RETRY, timestamp
elif type_ == 'succeeded':
state, self.succeeded = SUCCESS, timestamp
elif type_ == 'revoked':
state, self.revoked = REVOKED, timestamp
else:
state = type_.upper()

# note that precedence here is reversed
# see implementation in celery.states.state.__lt__
if state != RETRY and self.state != RETRY and \
precedence(state) > precedence(self.state):
# this state logically happens-before the current state, so merge.
keep = self.merge_rules.get(state)
if keep is not None:
fields = dict(
(k, v) for k, v in items(fields) if k in keep
)
for key, value in items(fields):
setattr(self, key, value)
else:
self.state = state
self.timestamp = timestamp
for key, value in items(fields):
setattr(self, key, value)

def info(self, fields=None, extra=[]):
"""Information about this task suitable for on-screen display."""
fields = self._info_fields if fields is None else fields

def _keys():
for key in list(fields) + list(extra):
value = getattr(self, key, None)
if value is not None:
yield key, value

return dict(_keys())

def __repr__(self):
return R_TASK.format(self)

def as_dict(self):
get = object.__getattribute__
return dict(
(k, get(self, k)) for k in self._fields
)

def __reduce__(self):
return _depickle_task, (self.__class__, self.as_dict())

@property
def origin(self):
return self.client if self.worker is None else self.worker.id

@property
def ready(self):
return self.state in states.READY_STATES

@deprecated(3.2, 3.3)
def on_sent(self, timestamp=None, **fields):
self.event('sent', timestamp, fields)

@deprecated(3.2, 3.3)
def on_received(self, timestamp=None, **fields):
self.event('received', timestamp, fields)

@deprecated(3.2, 3.3)
def on_started(self, timestamp=None, **fields):
self.event('started', timestamp, fields)

@deprecated(3.2, 3.3)
def on_failed(self, timestamp=None, **fields):
self.event('failed', timestamp, fields)

@deprecated(3.2, 3.3)
def on_retried(self, timestamp=None, **fields):
self.event('retried', timestamp, fields)

@deprecated(3.2, 3.3)
def on_succeeded(self, timestamp=None, **fields):
self.event('succeeded', timestamp, fields)

@deprecated(3.2, 3.3)
def on_revoked(self, timestamp=None, **fields):
self.event('revoked', timestamp, fields)

@deprecated(3.2, 3.3)
def on_unknown_event(self, shortype, timestamp=None, **fields):
self.event(shortype, timestamp, fields)

@deprecated(3.2, 3.3)
def update(self, state, timestamp, fields,
_state=states.state, RETRY=states.RETRY):
return self.event(state, timestamp, None, fields)

@deprecated(3.2, 3.3)
def merge(self, state, timestamp, fields):
keep = self.merge_rules.get(state)
if keep is not None:
fields = dict((k, v) for k, v in items(fields) if k in keep)
for key, value in items(fields):
setattr(self, key, value)

@class_property
def _defaults(cls):
"""Deprecated, to be removed in 3.3."""
source = cls()
return dict((k, getattr(source, k)) for k in source._fields)


class State(object):
"""Records clusters state."""
Worker = Worker
Task = Task
event_count = 0
task_count = 0
heap_multiplier = 4

def __init__(self, callback=None,
workers=None, tasks=None, taskheap=None,
max_workers_in_memory=5000, max_tasks_in_memory=10000,
on_node_join=None, on_node_leave=None):
self.event_callback = callback
self.workers = (LRUCache(max_workers_in_memory)
if workers is None else workers)
self.tasks = (LRUCache(max_tasks_in_memory)
if tasks is None else tasks)
self._taskheap = [] if taskheap is None else taskheap
self.max_workers_in_memory = max_workers_in_memory
self.max_tasks_in_memory = max_tasks_in_memory
self.on_node_join = on_node_join
self.on_node_leave = on_node_leave
self._mutex = threading.Lock()
self.handlers = {}
self._seen_types = set()
self.rebuild_taskheap()

@cached_property
def _event(self):
return self._create_dispatcher()

def freeze_while(self, fun, *args, **kwargs):
clear_after = kwargs.pop('clear_after', False)
with self._mutex:
try:
return fun(*args, **kwargs)
finally:
if clear_after:
self._clear()

def clear_tasks(self, ready=True):
with self._mutex:
return self._clear_tasks(ready)

def _clear_tasks(self, ready=True):
if ready:
in_progress = dict(
(uuid, task) for uuid, task in self.itertasks()
if task.state not in states.READY_STATES)
self.tasks.clear()
self.tasks.update(in_progress)
else:
self.tasks.clear()
self._taskheap[:] = []

def _clear(self, ready=True):
self.workers.clear()
self._clear_tasks(ready)
self.event_count = 0
self.task_count = 0

def clear(self, ready=True):
with self._mutex:
return self._clear(ready)

def get_or_create_worker(self, hostname, **kwargs):
"""Get or create worker by hostname.

Return tuple of ``(worker, was_created)``.
"""
try:
worker = self.workers[hostname]
if kwargs:
worker.update(kwargs)
return worker, False
except KeyError:
worker = self.workers[hostname] = self.Worker(
hostname, **kwargs)
return worker, True

def get_or_create_task(self, uuid):
"""Get or create task by uuid."""
try:
return self.tasks[uuid], False
except KeyError:
task = self.tasks[uuid] = self.Task(uuid)
return task, True

def event(self, event):
with self._mutex:
return self._event(event)

def task_event(self, type_, fields):
"""Deprecated, use :meth:`event`."""
return self._event(dict(fields, type='-'.join(['task', type_])))[0]

def worker_event(self, type_, fields):
"""Deprecated, use :meth:`event`."""
return self._event(dict(fields, type='-'.join(['worker', type_])))[0]

def _create_dispatcher(self):
get_handler = self.handlers.__getitem__
event_callback = self.event_callback
wfields = itemgetter('hostname', 'timestamp', 'local_received')
tfields = itemgetter('uuid', 'hostname', 'timestamp',
'local_received', 'clock')
taskheap = self._taskheap
th_append = taskheap.append
th_pop = taskheap.pop
# Removing events from task heap is an O(n) operation,
# so easier to just account for the common number of events
# for each task (PENDING->RECEIVED->STARTED->final)
#: an O(n) operation
max_events_in_heap = self.max_tasks_in_memory * self.heap_multiplier
add_type = self._seen_types.add
on_node_join, on_node_leave = self.on_node_join, self.on_node_leave
tasks, Task = self.tasks, self.Task
workers, Worker = self.workers, self.Worker
# avoid updating LRU entry at getitem
get_worker, get_task = workers.data.__getitem__, tasks.data.__getitem__

def _event(event,
timetuple=timetuple, KeyError=KeyError,
insort=bisect.insort, created=True):
self.event_count += 1
if event_callback:
event_callback(self, event)
group, _, subject = event['type'].partition('-')
try:
handler = get_handler(group)
except KeyError:
pass
else:
return handler(subject, event), subject

if group == 'worker':
try:
hostname, timestamp, local_received = wfields(event)
except KeyError:
pass
else:
is_offline = subject == 'offline'
try:
worker, created = get_worker(hostname), False
except KeyError:
if is_offline:
worker, created = Worker(hostname), False
else:
worker = workers[hostname] = Worker(hostname)
worker.event(subject, timestamp, local_received, event)
if on_node_join and (created or subject == 'online'):
on_node_join(worker)
if on_node_leave and is_offline:
on_node_leave(worker)
workers.pop(hostname, None)
return (worker, created), subject
elif group == 'task':
(uuid, hostname, timestamp,
local_received, clock) = tfields(event)
# task-sent event is sent by client, not worker
is_client_event = subject == 'sent'
try:
task, created = get_task(uuid), False
except KeyError:
task = tasks[uuid] = Task(uuid)
if is_client_event:
task.client = hostname
else:
try:
worker, created = get_worker(hostname), False
except KeyError:
worker = workers[hostname] = Worker(hostname)
task.worker = worker
if worker is not None and local_received:
worker.event(None, local_received, timestamp)

origin = hostname if is_client_event else worker.id

# remove oldest event if exceeding the limit.
heaps = len(taskheap)
if heaps + 1 > max_events_in_heap:
th_pop(0)

# most events will be dated later than the previous.
timetup = timetuple(clock, timestamp, origin, ref(task))
if heaps and timetup > taskheap[-1]:
th_append(timetup)
else:
insort(taskheap, timetup)

if subject == 'received':
self.task_count += 1
task.event(subject, timestamp, local_received, event)
task_name = task.name
if task_name is not None:
add_type(task_name)
return (task, created), subject
return _event

def rebuild_taskheap(self, timetuple=timetuple):
heap = self._taskheap[:] = [
timetuple(t.clock, t.timestamp, t.origin, ref(t))
for t in values(self.tasks)
]
heap.sort()

def itertasks(self, limit=None):
for index, row in enumerate(items(self.tasks)):
yield row
if limit and index + 1 >= limit:
break

def tasks_by_time(self, limit=None):
"""Generator giving tasks ordered by time,
in ``(uuid, Task)`` tuples."""
seen = set()
for evtup in islice(reversed(self._taskheap), 0, limit):
task = evtup[3]()
if task is not None:
uuid = task.uuid
if uuid not in seen:
yield uuid, task
seen.add(uuid)
tasks_by_timestamp = tasks_by_time

def tasks_by_type(self, name, limit=None):
"""Get all tasks by type.

Return a list of ``(uuid, Task)`` tuples.

"""
return islice(
((uuid, task) for uuid, task in self.tasks_by_time()
if task.name == name),
0, limit,
)

def tasks_by_worker(self, hostname, limit=None):
"""Get all tasks by worker.

"""
return islice(
((uuid, task) for uuid, task in self.tasks_by_time()
if task.worker.hostname == hostname),
0, limit,
)

def task_types(self):
"""Return a list of all seen task types."""
return sorted(self._seen_types)

def alive_workers(self):
"""Return a list of (seemingly) alive workers."""
return [w for w in values(self.workers) if w.alive]

def __repr__(self):
return R_STATE.format(self)

def __reduce__(self):
return self.__class__, (
self.event_callback, self.workers, self.tasks, None,
self.max_workers_in_memory, self.max_tasks_in_memory,
self.on_node_join, self.on_node_leave,
)

+ 0
- 171
thesisenv/lib/python3.6/site-packages/celery/exceptions.py View File

# -*- coding: utf-8 -*-
"""
celery.exceptions
~~~~~~~~~~~~~~~~~

This module contains all exceptions used by the Celery API.

"""
from __future__ import absolute_import

import numbers

from .five import string_t

from billiard.exceptions import ( # noqa
SoftTimeLimitExceeded, TimeLimitExceeded, WorkerLostError, Terminated,
)

__all__ = ['SecurityError', 'Ignore', 'QueueNotFound',
'WorkerShutdown', 'WorkerTerminate',
'ImproperlyConfigured', 'NotRegistered', 'AlreadyRegistered',
'TimeoutError', 'MaxRetriesExceededError', 'Retry',
'TaskRevokedError', 'NotConfigured', 'AlwaysEagerIgnored',
'InvalidTaskError', 'ChordError', 'CPendingDeprecationWarning',
'CDeprecationWarning', 'FixupWarning', 'DuplicateNodenameWarning',
'SoftTimeLimitExceeded', 'TimeLimitExceeded', 'WorkerLostError',
'Terminated']

UNREGISTERED_FMT = """\
Task of kind {0} is not registered, please make sure it's imported.\
"""


class SecurityError(Exception):
"""Security related exceptions.

Handle with care.

"""


class Ignore(Exception):
"""A task can raise this to ignore doing state updates."""


class Reject(Exception):
"""A task can raise this if it wants to reject/requeue the message."""

def __init__(self, reason=None, requeue=False):
self.reason = reason
self.requeue = requeue
super(Reject, self).__init__(reason, requeue)

def __repr__(self):
return 'reject requeue=%s: %s' % (self.requeue, self.reason)


class WorkerTerminate(SystemExit):
"""Signals that the worker should terminate immediately."""
SystemTerminate = WorkerTerminate # XXX compat


class WorkerShutdown(SystemExit):
"""Signals that the worker should perform a warm shutdown."""


class QueueNotFound(KeyError):
"""Task routed to a queue not in CELERY_QUEUES."""


class ImproperlyConfigured(ImportError):
"""Celery is somehow improperly configured."""


class NotRegistered(KeyError):
"""The task is not registered."""

def __repr__(self):
return UNREGISTERED_FMT.format(self)


class AlreadyRegistered(Exception):
"""The task is already registered."""


class TimeoutError(Exception):
"""The operation timed out."""


class MaxRetriesExceededError(Exception):
"""The tasks max restart limit has been exceeded."""


class Retry(Exception):
"""The task is to be retried later."""

#: Optional message describing context of retry.
message = None

#: Exception (if any) that caused the retry to happen.
exc = None

#: Time of retry (ETA), either :class:`numbers.Real` or
#: :class:`~datetime.datetime`.
when = None

def __init__(self, message=None, exc=None, when=None, **kwargs):
from kombu.utils.encoding import safe_repr
self.message = message
if isinstance(exc, string_t):
self.exc, self.excs = None, exc
else:
self.exc, self.excs = exc, safe_repr(exc) if exc else None
self.when = when
Exception.__init__(self, exc, when, **kwargs)

def humanize(self):
if isinstance(self.when, numbers.Real):
return 'in {0.when}s'.format(self)
return 'at {0.when}'.format(self)

def __str__(self):
if self.message:
return self.message
if self.excs:
return 'Retry {0}: {1}'.format(self.humanize(), self.excs)
return 'Retry {0}'.format(self.humanize())

def __reduce__(self):
return self.__class__, (self.message, self.excs, self.when)
RetryTaskError = Retry # XXX compat


class TaskRevokedError(Exception):
"""The task has been revoked, so no result available."""


class NotConfigured(UserWarning):
"""Celery has not been configured, as no config module has been found."""


class AlwaysEagerIgnored(UserWarning):
"""send_task ignores CELERY_ALWAYS_EAGER option"""


class InvalidTaskError(Exception):
"""The task has invalid data or is not properly constructed."""


class IncompleteStream(Exception):
"""Found the end of a stream of data, but the data is not yet complete."""


class ChordError(Exception):
"""A task part of the chord raised an exception."""


class CPendingDeprecationWarning(PendingDeprecationWarning):
pass


class CDeprecationWarning(DeprecationWarning):
pass


class FixupWarning(UserWarning):
pass


class DuplicateNodenameWarning(UserWarning):
"""Multiple workers are using the same nodename."""

+ 0
- 392
thesisenv/lib/python3.6/site-packages/celery/five.py View File

# -*- coding: utf-8 -*-
"""
celery.five
~~~~~~~~~~~

Compatibility implementations of features
only available in newer Python versions.


"""
from __future__ import absolute_import

import io
import operator
import sys

from importlib import import_module
from types import ModuleType

from kombu.five import monotonic

try:
from collections import Counter
except ImportError: # pragma: no cover
from collections import defaultdict

def Counter(): # noqa
return defaultdict(int)

__all__ = ['Counter', 'reload', 'UserList', 'UserDict', 'Queue', 'Empty',
'zip_longest', 'map', 'string', 'string_t',
'long_t', 'text_t', 'range', 'int_types', 'items', 'keys', 'values',
'nextfun', 'reraise', 'WhateverIO', 'with_metaclass',
'OrderedDict', 'THREAD_TIMEOUT_MAX', 'format_d',
'class_property', 'reclassmethod', 'create_module',
'recreate_module', 'monotonic']

# ############# py3k #########################################################
PY3 = sys.version_info[0] == 3

try:
reload = reload # noqa
except NameError: # pragma: no cover
from imp import reload # noqa

try:
from UserList import UserList # noqa
except ImportError: # pragma: no cover
from collections import UserList # noqa

try:
from UserDict import UserDict # noqa
except ImportError: # pragma: no cover
from collections import UserDict # noqa


if PY3: # pragma: no cover
import builtins

from queue import Queue, Empty
from itertools import zip_longest

map = map
string = str
string_t = str
long_t = int
text_t = str
range = range
int_types = (int, )
_byte_t = bytes

open_fqdn = 'builtins.open'

def items(d):
return d.items()

def keys(d):
return d.keys()

def values(d):
return d.values()

def nextfun(it):
return it.__next__

exec_ = getattr(builtins, 'exec')

def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value

else:
import __builtin__ as builtins # noqa
from Queue import Queue, Empty # noqa
from itertools import imap as map, izip_longest as zip_longest # noqa
string = unicode # noqa
string_t = basestring # noqa
text_t = unicode # noqa
long_t = long # noqa
range = xrange # noqa
int_types = (int, long) # noqa
_byte_t = (str, bytes) # noqa

open_fqdn = '__builtin__.open'

def items(d): # noqa
return d.iteritems()

def keys(d): # noqa
return d.iterkeys()

def values(d): # noqa
return d.itervalues()

def nextfun(it): # noqa
return it.next

def exec_(code, globs=None, locs=None): # pragma: no cover
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")

exec_("""def reraise(tp, value, tb=None): raise tp, value, tb""")


def with_metaclass(Type, skip_attrs=set(['__dict__', '__weakref__'])):
"""Class decorator to set metaclass.

Works with both Python 2 and Python 3 and it does not add
an extra class in the lookup order like ``six.with_metaclass`` does
(that is -- it copies the original class instead of using inheritance).

"""

def _clone_with_metaclass(Class):
attrs = dict((key, value) for key, value in items(vars(Class))
if key not in skip_attrs)
return Type(Class.__name__, Class.__bases__, attrs)

return _clone_with_metaclass


# ############# collections.OrderedDict ######################################
# was moved to kombu
from kombu.utils.compat import OrderedDict # noqa

# ############# threading.TIMEOUT_MAX ########################################
try:
from threading import TIMEOUT_MAX as THREAD_TIMEOUT_MAX
except ImportError:
THREAD_TIMEOUT_MAX = 1e10 # noqa

# ############# format(int, ',d') ############################################

if sys.version_info >= (2, 7): # pragma: no cover
def format_d(i):
return format(i, ',d')
else: # pragma: no cover
def format_d(i): # noqa
s = '%d' % i
groups = []
while s and s[-1].isdigit():
groups.append(s[-3:])
s = s[:-3]
return s + ','.join(reversed(groups))


# ############# Module Generation ############################################

# Utilities to dynamically
# recreate modules, either for lazy loading or
# to create old modules at runtime instead of
# having them litter the source tree.

# import fails in python 2.5. fallback to reduce in stdlib
try:
from functools import reduce
except ImportError:
pass

MODULE_DEPRECATED = """
The module %s is deprecated and will be removed in a future version.
"""

DEFAULT_ATTRS = set(['__file__', '__path__', '__doc__', '__all__'])

# im_func is no longer available in Py3.
# instead the unbound method itself can be used.
if sys.version_info[0] == 3: # pragma: no cover
def fun_of_method(method):
return method
else:
def fun_of_method(method): # noqa
return method.im_func


def getappattr(path):
"""Gets attribute from the current_app recursively,
e.g. getappattr('amqp.get_task_consumer')``."""
from celery import current_app
return current_app._rgetattr(path)


def _compat_task_decorator(*args, **kwargs):
from celery import current_app
kwargs.setdefault('accept_magic_kwargs', True)
return current_app.task(*args, **kwargs)


def _compat_periodic_task_decorator(*args, **kwargs):
from celery.task import periodic_task
kwargs.setdefault('accept_magic_kwargs', True)
return periodic_task(*args, **kwargs)


COMPAT_MODULES = {
'celery': {
'execute': {
'send_task': 'send_task',
},
'decorators': {
'task': _compat_task_decorator,
'periodic_task': _compat_periodic_task_decorator,
},
'log': {
'get_default_logger': 'log.get_default_logger',
'setup_logger': 'log.setup_logger',
'setup_logging_subsystem': 'log.setup_logging_subsystem',
'redirect_stdouts_to_logger': 'log.redirect_stdouts_to_logger',
},
'messaging': {
'TaskPublisher': 'amqp.TaskPublisher',
'TaskConsumer': 'amqp.TaskConsumer',
'establish_connection': 'connection',
'get_consumer_set': 'amqp.TaskConsumer',
},
'registry': {
'tasks': 'tasks',
},
},
'celery.task': {
'control': {
'broadcast': 'control.broadcast',
'rate_limit': 'control.rate_limit',
'time_limit': 'control.time_limit',
'ping': 'control.ping',
'revoke': 'control.revoke',
'discard_all': 'control.purge',
'inspect': 'control.inspect',
},
'schedules': 'celery.schedules',
'chords': 'celery.canvas',
}
}


class class_property(object):

def __init__(self, getter=None, setter=None):
if getter is not None and not isinstance(getter, classmethod):
getter = classmethod(getter)
if setter is not None and not isinstance(setter, classmethod):
setter = classmethod(setter)
self.__get = getter
self.__set = setter

info = getter.__get__(object) # just need the info attrs.
self.__doc__ = info.__doc__
self.__name__ = info.__name__
self.__module__ = info.__module__

def __get__(self, obj, type=None):
if obj and type is None:
type = obj.__class__
return self.__get.__get__(obj, type)()

def __set__(self, obj, value):
if obj is None:
return self
return self.__set.__get__(obj)(value)

def setter(self, setter):
return self.__class__(self.__get, setter)


def reclassmethod(method):
return classmethod(fun_of_method(method))


class LazyModule(ModuleType):
_compat_modules = ()
_all_by_module = {}
_direct = {}
_object_origins = {}

def __getattr__(self, name):
if name in self._object_origins:
module = __import__(self._object_origins[name], None, None, [name])
for item in self._all_by_module[module.__name__]:
setattr(self, item, getattr(module, item))
return getattr(module, name)
elif name in self._direct: # pragma: no cover
module = __import__(self._direct[name], None, None, [name])
setattr(self, name, module)
return module
return ModuleType.__getattribute__(self, name)

def __dir__(self):
return list(set(self.__all__) | DEFAULT_ATTRS)

def __reduce__(self):
return import_module, (self.__name__, )


def create_module(name, attrs, cls_attrs=None, pkg=None,
base=LazyModule, prepare_attr=None):
fqdn = '.'.join([pkg.__name__, name]) if pkg else name
cls_attrs = {} if cls_attrs is None else cls_attrs
pkg, _, modname = name.rpartition('.')
cls_attrs['__module__'] = pkg

attrs = dict((attr_name, prepare_attr(attr) if prepare_attr else attr)
for attr_name, attr in items(attrs))
module = sys.modules[fqdn] = type(modname, (base, ), cls_attrs)(fqdn)
module.__dict__.update(attrs)
return module


def recreate_module(name, compat_modules=(), by_module={}, direct={},
base=LazyModule, **attrs):
old_module = sys.modules[name]
origins = get_origins(by_module)
compat_modules = COMPAT_MODULES.get(name, ())

cattrs = dict(
_compat_modules=compat_modules,
_all_by_module=by_module, _direct=direct,
_object_origins=origins,
__all__=tuple(set(reduce(
operator.add,
[tuple(v) for v in [compat_modules, origins, direct, attrs]],
))),
)
new_module = create_module(name, attrs, cls_attrs=cattrs, base=base)
new_module.__dict__.update(dict((mod, get_compat_module(new_module, mod))
for mod in compat_modules))
return old_module, new_module


def get_compat_module(pkg, name):
from .local import Proxy

def prepare(attr):
if isinstance(attr, string_t):
return Proxy(getappattr, (attr, ))
return attr

attrs = COMPAT_MODULES[pkg.__name__][name]
if isinstance(attrs, string_t):
fqdn = '.'.join([pkg.__name__, name])
module = sys.modules[fqdn] = import_module(attrs)
return module
attrs['__all__'] = list(attrs)
return create_module(name, dict(attrs), pkg=pkg, prepare_attr=prepare)


def get_origins(defs):
origins = {}
for module, attrs in items(defs):
origins.update(dict((attr, module) for attr in attrs))
return origins


_SIO_write = io.StringIO.write
_SIO_init = io.StringIO.__init__


class WhateverIO(io.StringIO):

def __init__(self, v=None, *a, **kw):
_SIO_init(self, v.decode() if isinstance(v, _byte_t) else v, *a, **kw)

def write(self, data):
_SIO_write(self, data.decode() if isinstance(data, _byte_t) else data)

+ 0
- 0
thesisenv/lib/python3.6/site-packages/celery/fixups/__init__.py View File


+ 0
- 266
thesisenv/lib/python3.6/site-packages/celery/fixups/django.py View File

from __future__ import absolute_import

import os
import sys
import warnings

from kombu.utils import cached_property, symbol_by_name

from datetime import datetime
from importlib import import_module

from celery import signals
from celery.exceptions import FixupWarning

if sys.version_info[0] < 3 and not hasattr(sys, 'pypy_version_info'):
from StringIO import StringIO
else:
from io import StringIO

__all__ = ['DjangoFixup', 'fixup']

ERR_NOT_INSTALLED = """\
Environment variable DJANGO_SETTINGS_MODULE is defined
but Django is not installed. Will not apply Django fixups!
"""


def _maybe_close_fd(fh):
try:
os.close(fh.fileno())
except (AttributeError, OSError, TypeError):
# TypeError added for celery#962
pass


def fixup(app, env='DJANGO_SETTINGS_MODULE'):
SETTINGS_MODULE = os.environ.get(env)
if SETTINGS_MODULE and 'django' not in app.loader_cls.lower():
try:
import django # noqa
except ImportError:
warnings.warn(FixupWarning(ERR_NOT_INSTALLED))
else:
return DjangoFixup(app).install()


class DjangoFixup(object):

def __init__(self, app):
self.app = app
self.app.set_default()
self._worker_fixup = None

def install(self):
# Need to add project directory to path
sys.path.append(os.getcwd())

self.app.loader.now = self.now
self.app.loader.mail_admins = self.mail_admins

signals.import_modules.connect(self.on_import_modules)
signals.worker_init.connect(self.on_worker_init)
return self

@cached_property
def worker_fixup(self):
if self._worker_fixup is None:
self._worker_fixup = DjangoWorkerFixup(self.app)
return self._worker_fixup

def on_import_modules(self, **kwargs):
# call django.setup() before task modules are imported
self.worker_fixup.validate_models()

def on_worker_init(self, **kwargs):
self.worker_fixup.install()

def now(self, utc=False):
return datetime.utcnow() if utc else self._now()

def mail_admins(self, subject, body, fail_silently=False, **kwargs):
return self._mail_admins(subject, body, fail_silently=fail_silently)

@cached_property
def _mail_admins(self):
return symbol_by_name('django.core.mail:mail_admins')

@cached_property
def _now(self):
try:
return symbol_by_name('django.utils.timezone:now')
except (AttributeError, ImportError): # pre django-1.4
return datetime.now


class DjangoWorkerFixup(object):
_db_recycles = 0

def __init__(self, app):
self.app = app
self.db_reuse_max = self.app.conf.get('CELERY_DB_REUSE_MAX', None)
self._db = import_module('django.db')
self._cache = import_module('django.core.cache')
self._settings = symbol_by_name('django.conf:settings')

# Database-related exceptions.
DatabaseError = symbol_by_name('django.db:DatabaseError')
try:
import MySQLdb as mysql
_my_database_errors = (mysql.DatabaseError,
mysql.InterfaceError,
mysql.OperationalError)
except ImportError:
_my_database_errors = () # noqa
try:
import psycopg2 as pg
_pg_database_errors = (pg.DatabaseError,
pg.InterfaceError,
pg.OperationalError)
except ImportError:
_pg_database_errors = () # noqa
try:
import sqlite3
_lite_database_errors = (sqlite3.DatabaseError,
sqlite3.InterfaceError,
sqlite3.OperationalError)
except ImportError:
_lite_database_errors = () # noqa
try:
import cx_Oracle as oracle
_oracle_database_errors = (oracle.DatabaseError,
oracle.InterfaceError,
oracle.OperationalError)
except ImportError:
_oracle_database_errors = () # noqa

try:
self._close_old_connections = symbol_by_name(
'django.db:close_old_connections',
)
except (ImportError, AttributeError):
self._close_old_connections = None
self.database_errors = (
(DatabaseError, ) +
_my_database_errors +
_pg_database_errors +
_lite_database_errors +
_oracle_database_errors
)

def validate_models(self):
import django
try:
django_setup = django.setup
except AttributeError:
pass
else:
django_setup()
s = StringIO()
try:
from django.core.management.validation import get_validation_errors
except ImportError:
from django.core.management.base import BaseCommand
cmd = BaseCommand()
try:
# since django 1.5
from django.core.management.base import OutputWrapper
cmd.stdout = OutputWrapper(sys.stdout)
cmd.stderr = OutputWrapper(sys.stderr)
except ImportError:
cmd.stdout, cmd.stderr = sys.stdout, sys.stderr

cmd.check()
else:
num_errors = get_validation_errors(s, None)
if num_errors:
raise RuntimeError(
'One or more Django models did not validate:\n{0}'.format(
s.getvalue()))

def install(self):
signals.beat_embedded_init.connect(self.close_database)
signals.worker_ready.connect(self.on_worker_ready)
signals.task_prerun.connect(self.on_task_prerun)
signals.task_postrun.connect(self.on_task_postrun)
signals.worker_process_init.connect(self.on_worker_process_init)
self.close_database()
self.close_cache()
return self

def on_worker_process_init(self, **kwargs):
# Child process must validate models again if on Windows,
# or if they were started using execv.
if os.environ.get('FORKED_BY_MULTIPROCESSING'):
self.validate_models()

# close connections:
# the parent process may have established these,
# so need to close them.

# calling db.close() on some DB connections will cause
# the inherited DB conn to also get broken in the parent
# process so we need to remove it without triggering any
# network IO that close() might cause.
try:
for c in self._db.connections.all():
if c and c.connection:
_maybe_close_fd(c.connection)
except AttributeError:
if self._db.connection and self._db.connection.connection:
_maybe_close_fd(self._db.connection.connection)

# use the _ version to avoid DB_REUSE preventing the conn.close() call
self._close_database()
self.close_cache()

def on_task_prerun(self, sender, **kwargs):
"""Called before every task."""
if not getattr(sender.request, 'is_eager', False):
self.close_database()

def on_task_postrun(self, sender, **kwargs):
# See http://groups.google.com/group/django-users/
# browse_thread/thread/78200863d0c07c6d/
if not getattr(sender.request, 'is_eager', False):
self.close_database()
self.close_cache()

def close_database(self, **kwargs):
if self._close_old_connections:
return self._close_old_connections() # Django 1.6
if not self.db_reuse_max:
return self._close_database()
if self._db_recycles >= self.db_reuse_max * 2:
self._db_recycles = 0
self._close_database()
self._db_recycles += 1

def _close_database(self):
try:
funs = [conn.close for conn in self._db.connections.all()]
except AttributeError:
if hasattr(self._db, 'close_old_connections'): # django 1.6
funs = [self._db.close_old_connections]
else:
# pre multidb, pending deprication in django 1.6
funs = [self._db.close_connection]

for close in funs:
try:
close()
except self.database_errors as exc:
str_exc = str(exc)
if 'closed' not in str_exc and 'not connected' not in str_exc:
raise

def close_cache(self):
try:
self._cache.cache.close()
except (TypeError, AttributeError):
pass

def on_worker_ready(self, **kwargs):
if self._settings.DEBUG:
warnings.warn('Using settings.DEBUG leads to a memory leak, never '
'use this setting in production environments!')

+ 0
- 37
thesisenv/lib/python3.6/site-packages/celery/loaders/__init__.py View File

# -*- coding: utf-8 -*-
"""
celery.loaders
~~~~~~~~~~~~~~

Loaders define how configuration is read, what happens
when workers start, when tasks are executed and so on.

"""
from __future__ import absolute_import

from celery._state import current_app
from celery.utils import deprecated
from celery.utils.imports import symbol_by_name, import_from_cwd

__all__ = ['get_loader_cls']

LOADER_ALIASES = {'app': 'celery.loaders.app:AppLoader',
'default': 'celery.loaders.default:Loader',
'django': 'djcelery.loaders:DjangoLoader'}


def get_loader_cls(loader):
"""Get loader class by name/alias"""
return symbol_by_name(loader, LOADER_ALIASES, imp=import_from_cwd)


@deprecated(deprecation=2.5, removal=4.0,
alternative='celery.current_app.loader')
def current_loader():
return current_app.loader


@deprecated(deprecation=2.5, removal=4.0,
alternative='celery.current_app.conf')
def load_settings():
return current_app.conf

+ 0
- 17
thesisenv/lib/python3.6/site-packages/celery/loaders/app.py View File

# -*- coding: utf-8 -*-
"""
celery.loaders.app
~~~~~~~~~~~~~~~~~~

The default loader used with custom app instances.

"""
from __future__ import absolute_import

from .base import BaseLoader

__all__ = ['AppLoader']


class AppLoader(BaseLoader):
pass

+ 0
- 299
thesisenv/lib/python3.6/site-packages/celery/loaders/base.py View File

# -*- coding: utf-8 -*-
"""
celery.loaders.base
~~~~~~~~~~~~~~~~~~~

Loader base class.

"""
from __future__ import absolute_import

import anyjson
import imp as _imp
import importlib
import os
import re
import sys

from datetime import datetime

from kombu.utils import cached_property
from kombu.utils.encoding import safe_str

from celery import signals
from celery.datastructures import DictAttribute, force_mapping
from celery.five import reraise, string_t
from celery.utils.functional import maybe_list
from celery.utils.imports import (
import_from_cwd, symbol_by_name, NotAPackage, find_module,
)

__all__ = ['BaseLoader']

_RACE_PROTECTION = False
CONFIG_INVALID_NAME = """\
Error: Module '{module}' doesn't exist, or it's not a valid \
Python module name.
"""

CONFIG_WITH_SUFFIX = CONFIG_INVALID_NAME + """\
Did you mean '{suggest}'?
"""


class BaseLoader(object):
"""The base class for loaders.

Loaders handles,

* Reading celery client/worker configurations.

* What happens when a task starts?
See :meth:`on_task_init`.

* What happens when the worker starts?
See :meth:`on_worker_init`.

* What happens when the worker shuts down?
See :meth:`on_worker_shutdown`.

* What modules are imported to find tasks?

"""
builtin_modules = frozenset()
configured = False
override_backends = {}
worker_initialized = False

_conf = None

def __init__(self, app, **kwargs):
self.app = app
self.task_modules = set()

def now(self, utc=True):
if utc:
return datetime.utcnow()
return datetime.now()

def on_task_init(self, task_id, task):
"""This method is called before a task is executed."""
pass

def on_process_cleanup(self):
"""This method is called after a task is executed."""
pass

def on_worker_init(self):
"""This method is called when the worker (:program:`celery worker`)
starts."""
pass

def on_worker_shutdown(self):
"""This method is called when the worker (:program:`celery worker`)
shuts down."""
pass

def on_worker_process_init(self):
"""This method is called when a child process starts."""
pass

def import_task_module(self, module):
self.task_modules.add(module)
return self.import_from_cwd(module)

def import_module(self, module, package=None):
return importlib.import_module(module, package=package)

def import_from_cwd(self, module, imp=None, package=None):
return import_from_cwd(
module,
self.import_module if imp is None else imp,
package=package,
)

def import_default_modules(self):
signals.import_modules.send(sender=self.app)
return [
self.import_task_module(m) for m in (
tuple(self.builtin_modules) +
tuple(maybe_list(self.app.conf.CELERY_IMPORTS)) +
tuple(maybe_list(self.app.conf.CELERY_INCLUDE))
)
]

def init_worker(self):
if not self.worker_initialized:
self.worker_initialized = True
self.import_default_modules()
self.on_worker_init()

def shutdown_worker(self):
self.on_worker_shutdown()

def init_worker_process(self):
self.on_worker_process_init()

def config_from_object(self, obj, silent=False):
if isinstance(obj, string_t):
try:
obj = self._smart_import(obj, imp=self.import_from_cwd)
except (ImportError, AttributeError):
if silent:
return False
raise
self._conf = force_mapping(obj)
return True

def _smart_import(self, path, imp=None):
imp = self.import_module if imp is None else imp
if ':' in path:
# Path includes attribute so can just jump here.
# e.g. ``os.path:abspath``.
return symbol_by_name(path, imp=imp)

# Not sure if path is just a module name or if it includes an
# attribute name (e.g. ``os.path``, vs, ``os.path.abspath``).
try:
return imp(path)
except ImportError:
# Not a module name, so try module + attribute.
return symbol_by_name(path, imp=imp)

def _import_config_module(self, name):
try:
self.find_module(name)
except NotAPackage:
if name.endswith('.py'):
reraise(NotAPackage, NotAPackage(CONFIG_WITH_SUFFIX.format(
module=name, suggest=name[:-3])), sys.exc_info()[2])
reraise(NotAPackage, NotAPackage(CONFIG_INVALID_NAME.format(
module=name)), sys.exc_info()[2])
else:
return self.import_from_cwd(name)

def find_module(self, module):
return find_module(module)

def cmdline_config_parser(
self, args, namespace='celery',
re_type=re.compile(r'\((\w+)\)'),
extra_types={'json': anyjson.loads},
override_types={'tuple': 'json',
'list': 'json',
'dict': 'json'}):
from celery.app.defaults import Option, NAMESPACES
namespace = namespace.upper()
typemap = dict(Option.typemap, **extra_types)

def getarg(arg):
"""Parse a single configuration definition from
the command-line."""

# ## find key/value
# ns.key=value|ns_key=value (case insensitive)
key, value = arg.split('=', 1)
key = key.upper().replace('.', '_')

# ## find namespace.
# .key=value|_key=value expands to default namespace.
if key[0] == '_':
ns, key = namespace, key[1:]
else:
# find namespace part of key
ns, key = key.split('_', 1)

ns_key = (ns and ns + '_' or '') + key

# (type)value makes cast to custom type.
cast = re_type.match(value)
if cast:
type_ = cast.groups()[0]
type_ = override_types.get(type_, type_)
value = value[len(cast.group()):]
value = typemap[type_](value)
else:
try:
value = NAMESPACES[ns][key].to_python(value)
except ValueError as exc:
# display key name in error message.
raise ValueError('{0!r}: {1}'.format(ns_key, exc))
return ns_key, value
return dict(getarg(arg) for arg in args)

def mail_admins(self, subject, body, fail_silently=False,
sender=None, to=None, host=None, port=None,
user=None, password=None, timeout=None,
use_ssl=False, use_tls=False, charset='utf-8'):
message = self.mail.Message(sender=sender, to=to,
subject=safe_str(subject),
body=safe_str(body),
charset=charset)
mailer = self.mail.Mailer(host=host, port=port,
user=user, password=password,
timeout=timeout, use_ssl=use_ssl,
use_tls=use_tls)
mailer.send(message, fail_silently=fail_silently)

def read_configuration(self, env='CELERY_CONFIG_MODULE'):
try:
custom_config = os.environ[env]
except KeyError:
pass
else:
if custom_config:
usercfg = self._import_config_module(custom_config)
return DictAttribute(usercfg)
return {}

def autodiscover_tasks(self, packages, related_name='tasks'):
self.task_modules.update(
mod.__name__ for mod in autodiscover_tasks(packages or (),
related_name) if mod)

@property
def conf(self):
"""Loader configuration."""
if self._conf is None:
self._conf = self.read_configuration()
return self._conf

@cached_property
def mail(self):
return self.import_module('celery.utils.mail')


def autodiscover_tasks(packages, related_name='tasks'):
global _RACE_PROTECTION

if _RACE_PROTECTION:
return ()
_RACE_PROTECTION = True
try:
return [find_related_module(pkg, related_name) for pkg in packages]
finally:
_RACE_PROTECTION = False


def find_related_module(package, related_name):
"""Given a package name and a module name, tries to find that
module."""

# Django 1.7 allows for speciying a class name in INSTALLED_APPS.
# (Issue #2248).
try:
importlib.import_module(package)
except ImportError:
package, _, _ = package.rpartition('.')

try:
pkg_path = importlib.import_module(package).__path__
except AttributeError:
return

try:
_imp.find_module(related_name, pkg_path)
except ImportError:
return

return importlib.import_module('{0}.{1}'.format(package, related_name))

+ 0
- 52
thesisenv/lib/python3.6/site-packages/celery/loaders/default.py View File

# -*- coding: utf-8 -*-
"""
celery.loaders.default
~~~~~~~~~~~~~~~~~~~~~~

The default loader used when no custom app has been initialized.

"""
from __future__ import absolute_import

import os
import warnings

from celery.datastructures import DictAttribute
from celery.exceptions import NotConfigured
from celery.utils import strtobool

from .base import BaseLoader

__all__ = ['Loader', 'DEFAULT_CONFIG_MODULE']

DEFAULT_CONFIG_MODULE = 'celeryconfig'

#: Warns if configuration file is missing if :envvar:`C_WNOCONF` is set.
C_WNOCONF = strtobool(os.environ.get('C_WNOCONF', False))


class Loader(BaseLoader):
"""The loader used by the default app."""

def setup_settings(self, settingsdict):
return DictAttribute(settingsdict)

def read_configuration(self, fail_silently=True):
"""Read configuration from :file:`celeryconfig.py` and configure
celery and Django so it can be used by regular Python."""
configname = os.environ.get('CELERY_CONFIG_MODULE',
DEFAULT_CONFIG_MODULE)
try:
usercfg = self._import_config_module(configname)
except ImportError:
if not fail_silently:
raise
# billiard sets this if forked using execv
if C_WNOCONF and not os.environ.get('FORKED_BY_MULTIPROCESSING'):
warnings.warn(NotConfigured(
'No {module} module found! Please make sure it exists and '
'is available to Python.'.format(module=configname)))
return self.setup_settings({})
else:
self.configured = True
return self.setup_settings(usercfg)

+ 0
- 373
thesisenv/lib/python3.6/site-packages/celery/local.py View File

# -*- coding: utf-8 -*-
"""
celery.local
~~~~~~~~~~~~

This module contains critical utilities that
needs to be loaded as soon as possible, and that
shall not load any third party modules.

Parts of this module is Copyright by Werkzeug Team.

"""
from __future__ import absolute_import

import importlib
import sys

from .five import string

__all__ = ['Proxy', 'PromiseProxy', 'try_import', 'maybe_evaluate']

__module__ = __name__ # used by Proxy class body

PY3 = sys.version_info[0] == 3


def _default_cls_attr(name, type_, cls_value):
# Proxy uses properties to forward the standard
# class attributes __module__, __name__ and __doc__ to the real
# object, but these needs to be a string when accessed from
# the Proxy class directly. This is a hack to make that work.
# -- See Issue #1087.

def __new__(cls, getter):
instance = type_.__new__(cls, cls_value)
instance.__getter = getter
return instance

def __get__(self, obj, cls=None):
return self.__getter(obj) if obj is not None else self

return type(name, (type_, ), {
'__new__': __new__, '__get__': __get__,
})


def try_import(module, default=None):
"""Try to import and return module, or return
None if the module does not exist."""
try:
return importlib.import_module(module)
except ImportError:
return default


class Proxy(object):
"""Proxy to another object."""

# Code stolen from werkzeug.local.Proxy.
__slots__ = ('__local', '__args', '__kwargs', '__dict__')

def __init__(self, local,
args=None, kwargs=None, name=None, __doc__=None):
object.__setattr__(self, '_Proxy__local', local)
object.__setattr__(self, '_Proxy__args', args or ())
object.__setattr__(self, '_Proxy__kwargs', kwargs or {})
if name is not None:
object.__setattr__(self, '__custom_name__', name)
if __doc__ is not None:
object.__setattr__(self, '__doc__', __doc__)

@_default_cls_attr('name', str, __name__)
def __name__(self):
try:
return self.__custom_name__
except AttributeError:
return self._get_current_object().__name__

@_default_cls_attr('module', str, __module__)
def __module__(self):
return self._get_current_object().__module__

@_default_cls_attr('doc', str, __doc__)
def __doc__(self):
return self._get_current_object().__doc__

def _get_class(self):
return self._get_current_object().__class__

@property
def __class__(self):
return self._get_class()

def _get_current_object(self):
"""Return the current object. This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
loc = object.__getattribute__(self, '_Proxy__local')
if not hasattr(loc, '__release_local__'):
return loc(*self.__args, **self.__kwargs)
try:
return getattr(loc, self.__name__)
except AttributeError:
raise RuntimeError('no object bound to {0.__name__}'.format(self))

@property
def __dict__(self):
try:
return self._get_current_object().__dict__
except RuntimeError: # pragma: no cover
raise AttributeError('__dict__')

def __repr__(self):
try:
obj = self._get_current_object()
except RuntimeError: # pragma: no cover
return '<{0} unbound>'.format(self.__class__.__name__)
return repr(obj)

def __bool__(self):
try:
return bool(self._get_current_object())
except RuntimeError: # pragma: no cover
return False
__nonzero__ = __bool__ # Py2

def __unicode__(self):
try:
return string(self._get_current_object())
except RuntimeError: # pragma: no cover
return repr(self)

def __dir__(self):
try:
return dir(self._get_current_object())
except RuntimeError: # pragma: no cover
return []

def __getattr__(self, name):
if name == '__members__':
return dir(self._get_current_object())
return getattr(self._get_current_object(), name)

def __setitem__(self, key, value):
self._get_current_object()[key] = value

def __delitem__(self, key):
del self._get_current_object()[key]

def __setslice__(self, i, j, seq):
self._get_current_object()[i:j] = seq

def __delslice__(self, i, j):
del self._get_current_object()[i:j]

def __setattr__(self, name, value):
setattr(self._get_current_object(), name, value)

def __delattr__(self, name):
delattr(self._get_current_object(), name)

def __str__(self):
return str(self._get_current_object())

def __lt__(self, other):
return self._get_current_object() < other

def __le__(self, other):
return self._get_current_object() <= other

def __eq__(self, other):
return self._get_current_object() == other

def __ne__(self, other):
return self._get_current_object() != other

def __gt__(self, other):
return self._get_current_object() > other

def __ge__(self, other):
return self._get_current_object() >= other

def __hash__(self):
return hash(self._get_current_object())

def __call__(self, *a, **kw):
return self._get_current_object()(*a, **kw)

def __len__(self):
return len(self._get_current_object())

def __getitem__(self, i):
return self._get_current_object()[i]

def __iter__(self):
return iter(self._get_current_object())

def __contains__(self, i):
return i in self._get_current_object()

def __getslice__(self, i, j):
return self._get_current_object()[i:j]

def __add__(self, other):
return self._get_current_object() + other

def __sub__(self, other):
return self._get_current_object() - other

def __mul__(self, other):
return self._get_current_object() * other

def __floordiv__(self, other):
return self._get_current_object() // other

def __mod__(self, other):
return self._get_current_object() % other

def __divmod__(self, other):
return self._get_current_object().__divmod__(other)

def __pow__(self, other):
return self._get_current_object() ** other

def __lshift__(self, other):
return self._get_current_object() << other

def __rshift__(self, other):
return self._get_current_object() >> other

def __and__(self, other):
return self._get_current_object() & other

def __xor__(self, other):
return self._get_current_object() ^ other

def __or__(self, other):
return self._get_current_object() | other

def __div__(self, other):
return self._get_current_object().__div__(other)

def __truediv__(self, other):
return self._get_current_object().__truediv__(other)

def __neg__(self):
return -(self._get_current_object())

def __pos__(self):
return +(self._get_current_object())

def __abs__(self):
return abs(self._get_current_object())

def __invert__(self):
return ~(self._get_current_object())

def __complex__(self):
return complex(self._get_current_object())

def __int__(self):
return int(self._get_current_object())

def __float__(self):
return float(self._get_current_object())

def __oct__(self):
return oct(self._get_current_object())

def __hex__(self):
return hex(self._get_current_object())

def __index__(self):
return self._get_current_object().__index__()

def __coerce__(self, other):
return self._get_current_object().__coerce__(other)

def __enter__(self):
return self._get_current_object().__enter__()

def __exit__(self, *a, **kw):
return self._get_current_object().__exit__(*a, **kw)

def __reduce__(self):
return self._get_current_object().__reduce__()

if not PY3:
def __cmp__(self, other):
return cmp(self._get_current_object(), other) # noqa

def __long__(self):
return long(self._get_current_object()) # noqa


class PromiseProxy(Proxy):
"""This is a proxy to an object that has not yet been evaulated.

:class:`Proxy` will evaluate the object each time, while the
promise will only evaluate it once.

"""

__slots__ = ('__pending__', )

def _get_current_object(self):
try:
return object.__getattribute__(self, '__thing')
except AttributeError:
return self.__evaluate__()

def __then__(self, fun, *args, **kwargs):
if self.__evaluated__():
return fun(*args, **kwargs)
from collections import deque
try:
pending = object.__getattribute__(self, '__pending__')
except AttributeError:
pending = None
if pending is None:
pending = deque()
object.__setattr__(self, '__pending__', pending)
pending.append((fun, args, kwargs))

def __evaluated__(self):
try:
object.__getattribute__(self, '__thing')
except AttributeError:
return False
return True

def __maybe_evaluate__(self):
return self._get_current_object()

def __evaluate__(self,
_clean=('_Proxy__local',
'_Proxy__args',
'_Proxy__kwargs')):
try:
thing = Proxy._get_current_object(self)
except:
raise
else:
object.__setattr__(self, '__thing', thing)
for attr in _clean:
try:
object.__delattr__(self, attr)
except AttributeError: # pragma: no cover
# May mask errors so ignore
pass
try:
pending = object.__getattribute__(self, '__pending__')
except AttributeError:
pass
else:
try:
while pending:
fun, args, kwargs = pending.popleft()
fun(*args, **kwargs)
finally:
try:
object.__delattr__(self, '__pending__')
except AttributeError:
pass
return thing


def maybe_evaluate(obj):
try:
return obj.__maybe_evaluate__()
except AttributeError:
return obj

+ 0
- 813
thesisenv/lib/python3.6/site-packages/celery/platforms.py View File

# -*- coding: utf-8 -*-
"""
celery.platforms
~~~~~~~~~~~~~~~~

Utilities dealing with platform specifics: signals, daemonization,
users, groups, and so on.

"""
from __future__ import absolute_import, print_function

import atexit
import errno
import math
import numbers
import os
import platform as _platform
import signal as _signal
import sys
import warnings

from collections import namedtuple

from billiard import current_process
# fileno used to be in this module
from kombu.utils import maybe_fileno
from kombu.utils.compat import get_errno
from kombu.utils.encoding import safe_str
from contextlib import contextmanager

from .local import try_import
from .five import items, range, reraise, string_t, zip_longest
from .utils.functional import uniq

_setproctitle = try_import('setproctitle')
resource = try_import('resource')
pwd = try_import('pwd')
grp = try_import('grp')
mputil = try_import('multiprocessing.util')

__all__ = ['EX_OK', 'EX_FAILURE', 'EX_UNAVAILABLE', 'EX_USAGE', 'SYSTEM',
'IS_OSX', 'IS_WINDOWS', 'pyimplementation', 'LockFailed',
'get_fdmax', 'Pidfile', 'create_pidlock',
'close_open_fds', 'DaemonContext', 'detached', 'parse_uid',
'parse_gid', 'setgroups', 'initgroups', 'setgid', 'setuid',
'maybe_drop_privileges', 'signals', 'set_process_title',
'set_mp_process_title', 'get_errno_name', 'ignore_errno',
'fd_by_path']

# exitcodes
EX_OK = getattr(os, 'EX_OK', 0)
EX_FAILURE = 1
EX_UNAVAILABLE = getattr(os, 'EX_UNAVAILABLE', 69)
EX_USAGE = getattr(os, 'EX_USAGE', 64)
EX_CANTCREAT = getattr(os, 'EX_CANTCREAT', 73)

SYSTEM = _platform.system()
IS_OSX = SYSTEM == 'Darwin'
IS_WINDOWS = SYSTEM == 'Windows'

DAEMON_WORKDIR = '/'

PIDFILE_FLAGS = os.O_CREAT | os.O_EXCL | os.O_WRONLY
PIDFILE_MODE = ((os.R_OK | os.W_OK) << 6) | ((os.R_OK) << 3) | ((os.R_OK))

PIDLOCKED = """ERROR: Pidfile ({0}) already exists.
Seems we're already running? (pid: {1})"""

_range = namedtuple('_range', ('start', 'stop'))

C_FORCE_ROOT = os.environ.get('C_FORCE_ROOT', False)

ROOT_DISALLOWED = """\
Running a worker with superuser privileges when the
worker accepts messages serialized with pickle is a very bad idea!

If you really want to continue then you have to set the C_FORCE_ROOT
environment variable (but please think about this before you do).

User information: uid={uid} euid={euid} gid={gid} egid={egid}
"""

ROOT_DISCOURAGED = """\
You are running the worker with superuser privileges, which is
absolutely not recommended!

Please specify a different user using the -u option.

User information: uid={uid} euid={euid} gid={gid} egid={egid}
"""


def pyimplementation():
"""Return string identifying the current Python implementation."""
if hasattr(_platform, 'python_implementation'):
return _platform.python_implementation()
elif sys.platform.startswith('java'):
return 'Jython ' + sys.platform
elif hasattr(sys, 'pypy_version_info'):
v = '.'.join(str(p) for p in sys.pypy_version_info[:3])
if sys.pypy_version_info[3:]:
v += '-' + ''.join(str(p) for p in sys.pypy_version_info[3:])
return 'PyPy ' + v
else:
return 'CPython'


class LockFailed(Exception):
"""Raised if a pidlock can't be acquired."""


def get_fdmax(default=None):
"""Return the maximum number of open file descriptors
on this system.

:keyword default: Value returned if there's no file
descriptor limit.

"""
try:
return os.sysconf('SC_OPEN_MAX')
except:
pass
if resource is None: # Windows
return default
fdmax = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if fdmax == resource.RLIM_INFINITY:
return default
return fdmax


class Pidfile(object):
"""Pidfile

This is the type returned by :func:`create_pidlock`.

TIP: Use the :func:`create_pidlock` function instead,
which is more convenient and also removes stale pidfiles (when
the process holding the lock is no longer running).

"""

#: Path to the pid lock file.
path = None

def __init__(self, path):
self.path = os.path.abspath(path)

def acquire(self):
"""Acquire lock."""
try:
self.write_pid()
except OSError as exc:
reraise(LockFailed, LockFailed(str(exc)), sys.exc_info()[2])
return self
__enter__ = acquire

def is_locked(self):
"""Return true if the pid lock exists."""
return os.path.exists(self.path)

def release(self, *args):
"""Release lock."""
self.remove()
__exit__ = release

def read_pid(self):
"""Read and return the current pid."""
with ignore_errno('ENOENT'):
with open(self.path, 'r') as fh:
line = fh.readline()
if line.strip() == line: # must contain '\n'
raise ValueError(
'Partial or invalid pidfile {0.path}'.format(self))

try:
return int(line.strip())
except ValueError:
raise ValueError(
'pidfile {0.path} contents invalid.'.format(self))

def remove(self):
"""Remove the lock."""
with ignore_errno(errno.ENOENT, errno.EACCES):
os.unlink(self.path)

def remove_if_stale(self):
"""Remove the lock if the process is not running.
(does not respond to signals)."""
try:
pid = self.read_pid()
except ValueError as exc:
print('Broken pidfile found. Removing it.', file=sys.stderr)
self.remove()
return True
if not pid:
self.remove()
return True

try:
os.kill(pid, 0)
except os.error as exc:
if exc.errno == errno.ESRCH:
print('Stale pidfile exists. Removing it.', file=sys.stderr)
self.remove()
return True
return False

def write_pid(self):
pid = os.getpid()
content = '{0}\n'.format(pid)

pidfile_fd = os.open(self.path, PIDFILE_FLAGS, PIDFILE_MODE)
pidfile = os.fdopen(pidfile_fd, 'w')
try:
pidfile.write(content)
# flush and sync so that the re-read below works.
pidfile.flush()
try:
os.fsync(pidfile_fd)
except AttributeError: # pragma: no cover
pass
finally:
pidfile.close()

rfh = open(self.path)
try:
if rfh.read() != content:
raise LockFailed(
"Inconsistency: Pidfile content doesn't match at re-read")
finally:
rfh.close()
PIDFile = Pidfile # compat alias


def create_pidlock(pidfile):
"""Create and verify pidfile.

If the pidfile already exists the program exits with an error message,
however if the process it refers to is not running anymore, the pidfile
is deleted and the program continues.

This function will automatically install an :mod:`atexit` handler
to release the lock at exit, you can skip this by calling
:func:`_create_pidlock` instead.

:returns: :class:`Pidfile`.

**Example**:

.. code-block:: python

pidlock = create_pidlock('/var/run/app.pid')

"""
pidlock = _create_pidlock(pidfile)
atexit.register(pidlock.release)
return pidlock


def _create_pidlock(pidfile):
pidlock = Pidfile(pidfile)
if pidlock.is_locked() and not pidlock.remove_if_stale():
print(PIDLOCKED.format(pidfile, pidlock.read_pid()), file=sys.stderr)
raise SystemExit(EX_CANTCREAT)
pidlock.acquire()
return pidlock


def fd_by_path(paths):
"""Return a list of fds.

This method returns list of fds corresponding to
file paths passed in paths variable.

:keyword paths: List of file paths go get fd for.

:returns: :list:.

**Example**:

.. code-block:: python

keep = fd_by_path(['/dev/urandom',
'/my/precious/'])
"""
stats = set()
for path in paths:
try:
fd = os.open(path, os.O_RDONLY)
except OSError:
continue
try:
stats.add(os.fstat(fd)[1:3])
finally:
os.close(fd)

def fd_in_stats(fd):
try:
return os.fstat(fd)[1:3] in stats
except OSError:
return False

return [_fd for _fd in range(get_fdmax(2048)) if fd_in_stats(_fd)]


if hasattr(os, 'closerange'):

def close_open_fds(keep=None):
# must make sure this is 0-inclusive (Issue #1882)
keep = list(uniq(sorted(
f for f in map(maybe_fileno, keep or []) if f is not None
)))
maxfd = get_fdmax(default=2048)
kL, kH = iter([-1] + keep), iter(keep + [maxfd])
for low, high in zip_longest(kL, kH):
if low + 1 != high:
os.closerange(low + 1, high)

else:

def close_open_fds(keep=None): # noqa
keep = [maybe_fileno(f)
for f in (keep or []) if maybe_fileno(f) is not None]
for fd in reversed(range(get_fdmax(default=2048))):
if fd not in keep:
with ignore_errno(errno.EBADF):
os.close(fd)


class DaemonContext(object):
_is_open = False

def __init__(self, pidfile=None, workdir=None, umask=None,
fake=False, after_chdir=None, after_forkers=True,
**kwargs):
if isinstance(umask, string_t):
# octal or decimal, depending on initial zero.
umask = int(umask, 8 if umask.startswith('0') else 10)
self.workdir = workdir or DAEMON_WORKDIR
self.umask = umask
self.fake = fake
self.after_chdir = after_chdir
self.after_forkers = after_forkers
self.stdfds = (sys.stdin, sys.stdout, sys.stderr)

def redirect_to_null(self, fd):
if fd is not None:
dest = os.open(os.devnull, os.O_RDWR)
os.dup2(dest, fd)

def open(self):
if not self._is_open:
if not self.fake:
self._detach()

os.chdir(self.workdir)
if self.umask is not None:
os.umask(self.umask)

if self.after_chdir:
self.after_chdir()

if not self.fake:
# We need to keep /dev/urandom from closing because
# shelve needs it, and Beat needs shelve to start.
keep = list(self.stdfds) + fd_by_path(['/dev/urandom'])
close_open_fds(keep)
for fd in self.stdfds:
self.redirect_to_null(maybe_fileno(fd))
if self.after_forkers and mputil is not None:
mputil._run_after_forkers()

self._is_open = True
__enter__ = open

def close(self, *args):
if self._is_open:
self._is_open = False
__exit__ = close

def _detach(self):
if os.fork() == 0: # first child
os.setsid() # create new session
if os.fork() > 0: # second child
os._exit(0)
else:
os._exit(0)
return self


def detached(logfile=None, pidfile=None, uid=None, gid=None, umask=0,
workdir=None, fake=False, **opts):
"""Detach the current process in the background (daemonize).

:keyword logfile: Optional log file. The ability to write to this file
will be verified before the process is detached.
:keyword pidfile: Optional pidfile. The pidfile will not be created,
as this is the responsibility of the child. But the process will
exit if the pid lock exists and the pid written is still running.
:keyword uid: Optional user id or user name to change
effective privileges to.
:keyword gid: Optional group id or group name to change effective
privileges to.
:keyword umask: Optional umask that will be effective in the child process.
:keyword workdir: Optional new working directory.
:keyword fake: Don't actually detach, intented for debugging purposes.
:keyword \*\*opts: Ignored.

**Example**:

.. code-block:: python

from celery.platforms import detached, create_pidlock

with detached(logfile='/var/log/app.log', pidfile='/var/run/app.pid',
uid='nobody'):
# Now in detached child process with effective user set to nobody,
# and we know that our logfile can be written to, and that
# the pidfile is not locked.
pidlock = create_pidlock('/var/run/app.pid')

# Run the program
program.run(logfile='/var/log/app.log')

"""

if not resource:
raise RuntimeError('This platform does not support detach.')
workdir = os.getcwd() if workdir is None else workdir

signals.reset('SIGCLD') # Make sure SIGCLD is using the default handler.
maybe_drop_privileges(uid=uid, gid=gid)

def after_chdir_do():
# Since without stderr any errors will be silently suppressed,
# we need to know that we have access to the logfile.
logfile and open(logfile, 'a').close()
# Doesn't actually create the pidfile, but makes sure it's not stale.
if pidfile:
_create_pidlock(pidfile).release()

return DaemonContext(
umask=umask, workdir=workdir, fake=fake, after_chdir=after_chdir_do,
)


def parse_uid(uid):
"""Parse user id.

uid can be an integer (uid) or a string (user name), if a user name
the uid is taken from the system user registry.

"""
try:
return int(uid)
except ValueError:
try:
return pwd.getpwnam(uid).pw_uid
except (AttributeError, KeyError):
raise KeyError('User does not exist: {0}'.format(uid))


def parse_gid(gid):
"""Parse group id.

gid can be an integer (gid) or a string (group name), if a group name
the gid is taken from the system group registry.

"""
try:
return int(gid)
except ValueError:
try:
return grp.getgrnam(gid).gr_gid
except (AttributeError, KeyError):
raise KeyError('Group does not exist: {0}'.format(gid))


def _setgroups_hack(groups):
""":fun:`setgroups` may have a platform-dependent limit,
and it is not always possible to know in advance what this limit
is, so we use this ugly hack stolen from glibc."""
groups = groups[:]

while 1:
try:
return os.setgroups(groups)
except ValueError: # error from Python's check.
if len(groups) <= 1:
raise
groups[:] = groups[:-1]
except OSError as exc: # error from the OS.
if exc.errno != errno.EINVAL or len(groups) <= 1:
raise
groups[:] = groups[:-1]


def setgroups(groups):
"""Set active groups from a list of group ids."""
max_groups = None
try:
max_groups = os.sysconf('SC_NGROUPS_MAX')
except Exception:
pass
try:
return _setgroups_hack(groups[:max_groups])
except OSError as exc:
if exc.errno != errno.EPERM:
raise
if any(group not in groups for group in os.getgroups()):
# we shouldn't be allowed to change to this group.
raise


def initgroups(uid, gid):
"""Compat version of :func:`os.initgroups` which was first
added to Python 2.7."""
if not pwd: # pragma: no cover
return
username = pwd.getpwuid(uid)[0]
if hasattr(os, 'initgroups'): # Python 2.7+
return os.initgroups(username, gid)
groups = [gr.gr_gid for gr in grp.getgrall()
if username in gr.gr_mem]
setgroups(groups)


def setgid(gid):
"""Version of :func:`os.setgid` supporting group names."""
os.setgid(parse_gid(gid))


def setuid(uid):
"""Version of :func:`os.setuid` supporting usernames."""
os.setuid(parse_uid(uid))


def maybe_drop_privileges(uid=None, gid=None):
"""Change process privileges to new user/group.

If UID and GID is specified, the real user/group is changed.

If only UID is specified, the real user is changed, and the group is
changed to the users primary group.

If only GID is specified, only the group is changed.

"""
if sys.platform == 'win32':
return
if os.geteuid():
# no point trying to setuid unless we're root.
if not os.getuid():
raise AssertionError('contact support')
uid = uid and parse_uid(uid)
gid = gid and parse_gid(gid)

if uid:
# If GID isn't defined, get the primary GID of the user.
if not gid and pwd:
gid = pwd.getpwuid(uid).pw_gid
# Must set the GID before initgroups(), as setgid()
# is known to zap the group list on some platforms.

# setgid must happen before setuid (otherwise the setgid operation
# may fail because of insufficient privileges and possibly stay
# in a privileged group).
setgid(gid)
initgroups(uid, gid)

# at last:
setuid(uid)
# ... and make sure privileges cannot be restored:
try:
setuid(0)
except OSError as exc:
if get_errno(exc) != errno.EPERM:
raise
pass # Good: cannot restore privileges.
else:
raise RuntimeError(
'non-root user able to restore privileges after setuid.')
else:
gid and setgid(gid)

if uid and (not os.getuid()) and not (os.geteuid()):
raise AssertionError('Still root uid after drop privileges!')
if gid and (not os.getgid()) and not (os.getegid()):
raise AssertionError('Still root gid after drop privileges!')


class Signals(object):
"""Convenience interface to :mod:`signals`.

If the requested signal is not supported on the current platform,
the operation will be ignored.

**Examples**:

.. code-block:: python

>>> from celery.platforms import signals

>>> from proj.handlers import my_handler
>>> signals['INT'] = my_handler

>>> signals['INT']
my_handler

>>> signals.supported('INT')
True

>>> signals.signum('INT')
2

>>> signals.ignore('USR1')
>>> signals['USR1'] == signals.ignored
True

>>> signals.reset('USR1')
>>> signals['USR1'] == signals.default
True

>>> from proj.handlers import exit_handler, hup_handler
>>> signals.update(INT=exit_handler,
... TERM=exit_handler,
... HUP=hup_handler)

"""

ignored = _signal.SIG_IGN
default = _signal.SIG_DFL

if hasattr(_signal, 'setitimer'):

def arm_alarm(self, seconds):
_signal.setitimer(_signal.ITIMER_REAL, seconds)
else: # pragma: no cover
try:
from itimer import alarm as _itimer_alarm # noqa
except ImportError:

def arm_alarm(self, seconds): # noqa
_signal.alarm(math.ceil(seconds))
else: # pragma: no cover

def arm_alarm(self, seconds): # noqa
return _itimer_alarm(seconds) # noqa

def reset_alarm(self):
return _signal.alarm(0)

def supported(self, signal_name):
"""Return true value if ``signal_name`` exists on this platform."""
try:
return self.signum(signal_name)
except AttributeError:
pass

def signum(self, signal_name):
"""Get signal number from signal name."""
if isinstance(signal_name, numbers.Integral):
return signal_name
if not isinstance(signal_name, string_t) \
or not signal_name.isupper():
raise TypeError('signal name must be uppercase string.')
if not signal_name.startswith('SIG'):
signal_name = 'SIG' + signal_name
return getattr(_signal, signal_name)

def reset(self, *signal_names):
"""Reset signals to the default signal handler.

Does nothing if the platform doesn't support signals,
or the specified signal in particular.

"""
self.update((sig, self.default) for sig in signal_names)

def ignore(self, *signal_names):
"""Ignore signal using :const:`SIG_IGN`.

Does nothing if the platform doesn't support signals,
or the specified signal in particular.

"""
self.update((sig, self.ignored) for sig in signal_names)

def __getitem__(self, signal_name):
return _signal.getsignal(self.signum(signal_name))

def __setitem__(self, signal_name, handler):
"""Install signal handler.

Does nothing if the current platform doesn't support signals,
or the specified signal in particular.

"""
try:
_signal.signal(self.signum(signal_name), handler)
except (AttributeError, ValueError):
pass

def update(self, _d_=None, **sigmap):
"""Set signal handlers from a mapping."""
for signal_name, handler in items(dict(_d_ or {}, **sigmap)):
self[signal_name] = handler

signals = Signals()
get_signal = signals.signum # compat
install_signal_handler = signals.__setitem__ # compat
reset_signal = signals.reset # compat
ignore_signal = signals.ignore # compat


def strargv(argv):
arg_start = 2 if 'manage' in argv[0] else 1
if len(argv) > arg_start:
return ' '.join(argv[arg_start:])
return ''


def set_process_title(progname, info=None):
"""Set the ps name for the currently running process.

Only works if :mod:`setproctitle` is installed.

"""
proctitle = '[{0}]'.format(progname)
proctitle = '{0} {1}'.format(proctitle, info) if info else proctitle
if _setproctitle:
_setproctitle.setproctitle(safe_str(proctitle))
return proctitle


if os.environ.get('NOSETPS'): # pragma: no cover

def set_mp_process_title(*a, **k):
pass
else:

def set_mp_process_title(progname, info=None, hostname=None): # noqa
"""Set the ps name using the multiprocessing process name.

Only works if :mod:`setproctitle` is installed.

"""
if hostname:
progname = '{0}: {1}'.format(progname, hostname)
return set_process_title(
'{0}:{1}'.format(progname, current_process().name), info=info)


def get_errno_name(n):
"""Get errno for string, e.g. ``ENOENT``."""
if isinstance(n, string_t):
return getattr(errno, n)
return n


@contextmanager
def ignore_errno(*errnos, **kwargs):
"""Context manager to ignore specific POSIX error codes.

Takes a list of error codes to ignore, which can be either
the name of the code, or the code integer itself::

>>> with ignore_errno('ENOENT'):
... with open('foo', 'r') as fh:
... return fh.read()

>>> with ignore_errno(errno.ENOENT, errno.EPERM):
... pass

:keyword types: A tuple of exceptions to ignore (when the errno matches),
defaults to :exc:`Exception`.
"""
types = kwargs.get('types') or (Exception, )
errnos = [get_errno_name(errno) for errno in errnos]
try:
yield
except types as exc:
if not hasattr(exc, 'errno'):
raise
if exc.errno not in errnos:
raise


def check_privileges(accept_content):
uid = os.getuid() if hasattr(os, 'getuid') else 65535
gid = os.getgid() if hasattr(os, 'getgid') else 65535
euid = os.geteuid() if hasattr(os, 'geteuid') else 65535
egid = os.getegid() if hasattr(os, 'getegid') else 65535

if hasattr(os, 'fchown'):
if not all(hasattr(os, attr)
for attr in ['getuid', 'getgid', 'geteuid', 'getegid']):
raise AssertionError('suspicious platform, contact support')

if not uid or not gid or not euid or not egid:
if ('pickle' in accept_content or
'application/x-python-serialize' in accept_content):
if not C_FORCE_ROOT:
try:
print(ROOT_DISALLOWED.format(
uid=uid, euid=euid, gid=gid, egid=egid,
), file=sys.stderr)
finally:
os._exit(1)
warnings.warn(RuntimeWarning(ROOT_DISCOURAGED.format(
uid=uid, euid=euid, gid=gid, egid=egid,
)))

+ 0
- 925
thesisenv/lib/python3.6/site-packages/celery/result.py View File

# -*- coding: utf-8 -*-
"""
celery.result
~~~~~~~~~~~~~

Task results/state and groups of results.

"""
from __future__ import absolute_import

import time
import warnings

from collections import deque
from contextlib import contextmanager
from copy import copy

from kombu.utils import cached_property
from kombu.utils.compat import OrderedDict

from . import current_app
from . import states
from ._state import _set_task_join_will_block, task_join_will_block
from .app import app_or_default
from .datastructures import DependencyGraph, GraphFormatter
from .exceptions import IncompleteStream, TimeoutError
from .five import items, range, string_t, monotonic
from .utils import deprecated

__all__ = ['ResultBase', 'AsyncResult', 'ResultSet', 'GroupResult',
'EagerResult', 'result_from_tuple']

E_WOULDBLOCK = """\
Never call result.get() within a task!
See http://docs.celeryq.org/en/latest/userguide/tasks.html\
#task-synchronous-subtasks

In Celery 3.2 this will result in an exception being
raised instead of just being a warning.
"""


def assert_will_not_block():
if task_join_will_block():
warnings.warn(RuntimeWarning(E_WOULDBLOCK))


@contextmanager
def allow_join_result():
reset_value = task_join_will_block()
_set_task_join_will_block(False)
try:
yield
finally:
_set_task_join_will_block(reset_value)


class ResultBase(object):
"""Base class for all results"""

#: Parent result (if part of a chain)
parent = None


class AsyncResult(ResultBase):
"""Query task state.

:param id: see :attr:`id`.
:keyword backend: see :attr:`backend`.

"""
app = None

#: Error raised for timeouts.
TimeoutError = TimeoutError

#: The task's UUID.
id = None

#: The task result backend to use.
backend = None

def __init__(self, id, backend=None, task_name=None,
app=None, parent=None):
self.app = app_or_default(app or self.app)
self.id = id
self.backend = backend or self.app.backend
self.task_name = task_name
self.parent = parent
self._cache = None

def as_tuple(self):
parent = self.parent
return (self.id, parent and parent.as_tuple()), None
serializable = as_tuple # XXX compat

def forget(self):
"""Forget about (and possibly remove the result of) this task."""
self._cache = None
self.backend.forget(self.id)

def revoke(self, connection=None, terminate=False, signal=None,
wait=False, timeout=None):
"""Send revoke signal to all workers.

Any worker receiving the task, or having reserved the
task, *must* ignore it.

:keyword terminate: Also terminate the process currently working
on the task (if any).
:keyword signal: Name of signal to send to process if terminate.
Default is TERM.
:keyword wait: Wait for replies from workers. Will wait for 1 second
by default or you can specify a custom ``timeout``.
:keyword timeout: Time in seconds to wait for replies if ``wait``
enabled.

"""
self.app.control.revoke(self.id, connection=connection,
terminate=terminate, signal=signal,
reply=wait, timeout=timeout)

def get(self, timeout=None, propagate=True, interval=0.5,
no_ack=True, follow_parents=True,
EXCEPTION_STATES=states.EXCEPTION_STATES,
PROPAGATE_STATES=states.PROPAGATE_STATES):
"""Wait until task is ready, and return its result.

.. warning::

Waiting for tasks within a task may lead to deadlocks.
Please read :ref:`task-synchronous-subtasks`.

:keyword timeout: How long to wait, in seconds, before the
operation times out.
:keyword propagate: Re-raise exception if the task failed.
:keyword interval: Time to wait (in seconds) before retrying to
retrieve the result. Note that this does not have any effect
when using the amqp result store backend, as it does not
use polling.
:keyword no_ack: Enable amqp no ack (automatically acknowledge
message). If this is :const:`False` then the message will
**not be acked**.
:keyword follow_parents: Reraise any exception raised by parent task.

:raises celery.exceptions.TimeoutError: if `timeout` is not
:const:`None` and the result does not arrive within `timeout`
seconds.

If the remote call raised an exception then that exception will
be re-raised.

"""
assert_will_not_block()
on_interval = None
if follow_parents and propagate and self.parent:
on_interval = self._maybe_reraise_parent_error
on_interval()

if self._cache:
if propagate:
self.maybe_reraise()
return self.result

meta = self.backend.wait_for(
self.id, timeout=timeout,
interval=interval,
on_interval=on_interval,
no_ack=no_ack,
)
if meta:
self._maybe_set_cache(meta)
status = meta['status']
if status in PROPAGATE_STATES and propagate:
raise meta['result']
return meta['result']
wait = get # deprecated alias to :meth:`get`.

def _maybe_reraise_parent_error(self):
for node in reversed(list(self._parents())):
node.maybe_reraise()

def _parents(self):
node = self.parent
while node:
yield node
node = node.parent

def collect(self, intermediate=False, **kwargs):
"""Iterator, like :meth:`get` will wait for the task to complete,
but will also follow :class:`AsyncResult` and :class:`ResultSet`
returned by the task, yielding ``(result, value)`` tuples for each
result in the tree.

An example would be having the following tasks:

.. code-block:: python

from celery import group
from proj.celery import app

@app.task(trail=True)
def A(how_many):
return group(B.s(i) for i in range(how_many))()

@app.task(trail=True)
def B(i):
return pow2.delay(i)

@app.task(trail=True)
def pow2(i):
return i ** 2

Note that the ``trail`` option must be enabled
so that the list of children is stored in ``result.children``.
This is the default but enabled explicitly for illustration.

Calling :meth:`collect` would return:

.. code-block:: python

>>> from celery.result import ResultBase
>>> from proj.tasks import A

>>> result = A.delay(10)
>>> [v for v in result.collect()
... if not isinstance(v, (ResultBase, tuple))]
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]

"""
for _, R in self.iterdeps(intermediate=intermediate):
yield R, R.get(**kwargs)

def get_leaf(self):
value = None
for _, R in self.iterdeps():
value = R.get()
return value

def iterdeps(self, intermediate=False):
stack = deque([(None, self)])

while stack:
parent, node = stack.popleft()
yield parent, node
if node.ready():
stack.extend((node, child) for child in node.children or [])
else:
if not intermediate:
raise IncompleteStream()

def ready(self):
"""Returns :const:`True` if the task has been executed.

If the task is still running, pending, or is waiting
for retry then :const:`False` is returned.

"""
return self.state in self.backend.READY_STATES

def successful(self):
"""Returns :const:`True` if the task executed successfully."""
return self.state == states.SUCCESS

def failed(self):
"""Returns :const:`True` if the task failed."""
return self.state == states.FAILURE

def maybe_reraise(self):
if self.state in states.PROPAGATE_STATES:
raise self.result

def build_graph(self, intermediate=False, formatter=None):
graph = DependencyGraph(
formatter=formatter or GraphFormatter(root=self.id, shape='oval'),
)
for parent, node in self.iterdeps(intermediate=intermediate):
graph.add_arc(node)
if parent:
graph.add_edge(parent, node)
return graph

def __str__(self):
"""`str(self) -> self.id`"""
return str(self.id)

def __hash__(self):
"""`hash(self) -> hash(self.id)`"""
return hash(self.id)

def __repr__(self):
return '<{0}: {1}>'.format(type(self).__name__, self.id)

def __eq__(self, other):
if isinstance(other, AsyncResult):
return other.id == self.id
elif isinstance(other, string_t):
return other == self.id
return NotImplemented

def __ne__(self, other):
return not self.__eq__(other)

def __copy__(self):
return self.__class__(
self.id, self.backend, self.task_name, self.app, self.parent,
)

def __reduce__(self):
return self.__class__, self.__reduce_args__()

def __reduce_args__(self):
return self.id, self.backend, self.task_name, None, self.parent

def __del__(self):
self._cache = None

@cached_property
def graph(self):
return self.build_graph()

@property
def supports_native_join(self):
return self.backend.supports_native_join

@property
def children(self):
return self._get_task_meta().get('children')

def _maybe_set_cache(self, meta):
if meta:
state = meta['status']
if state == states.SUCCESS or state in states.PROPAGATE_STATES:
return self._set_cache(meta)
return meta

def _get_task_meta(self):
if self._cache is None:
return self._maybe_set_cache(self.backend.get_task_meta(self.id))
return self._cache

def _set_cache(self, d):
children = d.get('children')
if children:
d['children'] = [
result_from_tuple(child, self.app) for child in children
]
self._cache = d
return d

@property
def result(self):
"""When the task has been executed, this contains the return value.
If the task raised an exception, this will be the exception
instance."""
return self._get_task_meta()['result']
info = result

@property
def traceback(self):
"""Get the traceback of a failed task."""
return self._get_task_meta().get('traceback')

@property
def state(self):
"""The tasks current state.

Possible values includes:

*PENDING*

The task is waiting for execution.

*STARTED*

The task has been started.

*RETRY*

The task is to be retried, possibly because of failure.

*FAILURE*

The task raised an exception, or has exceeded the retry limit.
The :attr:`result` attribute then contains the
exception raised by the task.

*SUCCESS*

The task executed successfully. The :attr:`result` attribute
then contains the tasks return value.

"""
return self._get_task_meta()['status']
status = state

@property
def task_id(self):
"""compat alias to :attr:`id`"""
return self.id

@task_id.setter # noqa
def task_id(self, id):
self.id = id
BaseAsyncResult = AsyncResult # for backwards compatibility.


class ResultSet(ResultBase):
"""Working with more than one result.

:param results: List of result instances.

"""
app = None

#: List of results in in the set.
results = None

def __init__(self, results, app=None, **kwargs):
self.app = app_or_default(app or self.app)
self.results = results

def add(self, result):
"""Add :class:`AsyncResult` as a new member of the set.

Does nothing if the result is already a member.

"""
if result not in self.results:
self.results.append(result)

def remove(self, result):
"""Remove result from the set; it must be a member.

:raises KeyError: if the result is not a member.

"""
if isinstance(result, string_t):
result = self.app.AsyncResult(result)
try:
self.results.remove(result)
except ValueError:
raise KeyError(result)

def discard(self, result):
"""Remove result from the set if it is a member.

If it is not a member, do nothing.

"""
try:
self.remove(result)
except KeyError:
pass

def update(self, results):
"""Update set with the union of itself and an iterable with
results."""
self.results.extend(r for r in results if r not in self.results)

def clear(self):
"""Remove all results from this set."""
self.results[:] = [] # don't create new list.

def successful(self):
"""Was all of the tasks successful?

:returns: :const:`True` if all of the tasks finished
successfully (i.e. did not raise an exception).

"""
return all(result.successful() for result in self.results)

def failed(self):
"""Did any of the tasks fail?

:returns: :const:`True` if one of the tasks failed.
(i.e., raised an exception)

"""
return any(result.failed() for result in self.results)

def maybe_reraise(self):
for result in self.results:
result.maybe_reraise()

def waiting(self):
"""Are any of the tasks incomplete?

:returns: :const:`True` if one of the tasks are still
waiting for execution.

"""
return any(not result.ready() for result in self.results)

def ready(self):
"""Did all of the tasks complete? (either by success of failure).

:returns: :const:`True` if all of the tasks has been
executed.

"""
return all(result.ready() for result in self.results)

def completed_count(self):
"""Task completion count.

:returns: the number of tasks completed.

"""
return sum(int(result.successful()) for result in self.results)

def forget(self):
"""Forget about (and possible remove the result of) all the tasks."""
for result in self.results:
result.forget()

def revoke(self, connection=None, terminate=False, signal=None,
wait=False, timeout=None):
"""Send revoke signal to all workers for all tasks in the set.

:keyword terminate: Also terminate the process currently working
on the task (if any).
:keyword signal: Name of signal to send to process if terminate.
Default is TERM.
:keyword wait: Wait for replies from worker. Will wait for 1 second
by default or you can specify a custom ``timeout``.
:keyword timeout: Time in seconds to wait for replies if ``wait``
enabled.

"""
self.app.control.revoke([r.id for r in self.results],
connection=connection, timeout=timeout,
terminate=terminate, signal=signal, reply=wait)

def __iter__(self):
return iter(self.results)

def __getitem__(self, index):
"""`res[i] -> res.results[i]`"""
return self.results[index]

@deprecated('3.2', '3.3')
def iterate(self, timeout=None, propagate=True, interval=0.5):
"""Deprecated method, use :meth:`get` with a callback argument."""
elapsed = 0.0
results = OrderedDict((result.id, copy(result))
for result in self.results)

while results:
removed = set()
for task_id, result in items(results):
if result.ready():
yield result.get(timeout=timeout and timeout - elapsed,
propagate=propagate)
removed.add(task_id)
else:
if result.backend.subpolling_interval:
time.sleep(result.backend.subpolling_interval)
for task_id in removed:
results.pop(task_id, None)
time.sleep(interval)
elapsed += interval
if timeout and elapsed >= timeout:
raise TimeoutError('The operation timed out')

def get(self, timeout=None, propagate=True, interval=0.5,
callback=None, no_ack=True):
"""See :meth:`join`

This is here for API compatibility with :class:`AsyncResult`,
in addition it uses :meth:`join_native` if available for the
current result backend.

"""
return (self.join_native if self.supports_native_join else self.join)(
timeout=timeout, propagate=propagate,
interval=interval, callback=callback, no_ack=no_ack)

def join(self, timeout=None, propagate=True, interval=0.5,
callback=None, no_ack=True):
"""Gathers the results of all tasks as a list in order.

.. note::

This can be an expensive operation for result store
backends that must resort to polling (e.g. database).

You should consider using :meth:`join_native` if your backend
supports it.

.. warning::

Waiting for tasks within a task may lead to deadlocks.
Please see :ref:`task-synchronous-subtasks`.

:keyword timeout: The number of seconds to wait for results before
the operation times out.

:keyword propagate: If any of the tasks raises an exception, the
exception will be re-raised.

:keyword interval: Time to wait (in seconds) before retrying to
retrieve a result from the set. Note that this
does not have any effect when using the amqp
result store backend, as it does not use polling.

:keyword callback: Optional callback to be called for every result
received. Must have signature ``(task_id, value)``
No results will be returned by this function if
a callback is specified. The order of results
is also arbitrary when a callback is used.
To get access to the result object for a particular
id you will have to generate an index first:
``index = {r.id: r for r in gres.results.values()}``
Or you can create new result objects on the fly:
``result = app.AsyncResult(task_id)`` (both will
take advantage of the backend cache anyway).

:keyword no_ack: Automatic message acknowledgement (Note that if this
is set to :const:`False` then the messages *will not be
acknowledged*).

:raises celery.exceptions.TimeoutError: if ``timeout`` is not
:const:`None` and the operation takes longer than ``timeout``
seconds.

"""
assert_will_not_block()
time_start = monotonic()
remaining = None

results = []
for result in self.results:
remaining = None
if timeout:
remaining = timeout - (monotonic() - time_start)
if remaining <= 0.0:
raise TimeoutError('join operation timed out')
value = result.get(
timeout=remaining, propagate=propagate,
interval=interval, no_ack=no_ack,
)
if callback:
callback(result.id, value)
else:
results.append(value)
return results

def iter_native(self, timeout=None, interval=0.5, no_ack=True):
"""Backend optimized version of :meth:`iterate`.

.. versionadded:: 2.2

Note that this does not support collecting the results
for different task types using different backends.

This is currently only supported by the amqp, Redis and cache
result backends.

"""
results = self.results
if not results:
return iter([])
return self.backend.get_many(
set(r.id for r in results),
timeout=timeout, interval=interval, no_ack=no_ack,
)

def join_native(self, timeout=None, propagate=True,
interval=0.5, callback=None, no_ack=True):
"""Backend optimized version of :meth:`join`.

.. versionadded:: 2.2

Note that this does not support collecting the results
for different task types using different backends.

This is currently only supported by the amqp, Redis and cache
result backends.

"""
assert_will_not_block()
order_index = None if callback else dict(
(result.id, i) for i, result in enumerate(self.results)
)
acc = None if callback else [None for _ in range(len(self))]
for task_id, meta in self.iter_native(timeout, interval, no_ack):
value = meta['result']
if propagate and meta['status'] in states.PROPAGATE_STATES:
raise value
if callback:
callback(task_id, value)
else:
acc[order_index[task_id]] = value
return acc

def _failed_join_report(self):
return (res for res in self.results
if res.backend.is_cached(res.id) and
res.state in states.PROPAGATE_STATES)

def __len__(self):
return len(self.results)

def __eq__(self, other):
if isinstance(other, ResultSet):
return other.results == self.results
return NotImplemented

def __ne__(self, other):
return not self.__eq__(other)

def __repr__(self):
return '<{0}: [{1}]>'.format(type(self).__name__,
', '.join(r.id for r in self.results))

@property
def subtasks(self):
"""Deprecated alias to :attr:`results`."""
return self.results

@property
def supports_native_join(self):
try:
return self.results[0].supports_native_join
except IndexError:
pass

@property
def backend(self):
return self.app.backend if self.app else self.results[0].backend


class GroupResult(ResultSet):
"""Like :class:`ResultSet`, but with an associated id.

This type is returned by :class:`~celery.group`, and the
deprecated TaskSet, meth:`~celery.task.TaskSet.apply_async` method.

It enables inspection of the tasks state and return values as
a single entity.

:param id: The id of the group.
:param results: List of result instances.

"""

#: The UUID of the group.
id = None

#: List/iterator of results in the group
results = None

def __init__(self, id=None, results=None, **kwargs):
self.id = id
ResultSet.__init__(self, results, **kwargs)

def save(self, backend=None):
"""Save group-result for later retrieval using :meth:`restore`.

Example::

>>> def save_and_restore(result):
... result.save()
... result = GroupResult.restore(result.id)

"""
return (backend or self.app.backend).save_group(self.id, self)

def delete(self, backend=None):
"""Remove this result if it was previously saved."""
(backend or self.app.backend).delete_group(self.id)

def __reduce__(self):
return self.__class__, self.__reduce_args__()

def __reduce_args__(self):
return self.id, self.results

def __bool__(self):
return bool(self.id or self.results)
__nonzero__ = __bool__ # Included for Py2 backwards compatibility

def __eq__(self, other):
if isinstance(other, GroupResult):
return other.id == self.id and other.results == self.results
return NotImplemented

def __ne__(self, other):
return not self.__eq__(other)

def __repr__(self):
return '<{0}: {1} [{2}]>'.format(type(self).__name__, self.id,
', '.join(r.id for r in self.results))

def as_tuple(self):
return self.id, [r.as_tuple() for r in self.results]
serializable = as_tuple # XXX compat

@property
def children(self):
return self.results

@classmethod
def restore(self, id, backend=None):
"""Restore previously saved group result."""
return (
backend or (self.app.backend if self.app else current_app.backend)
).restore_group(id)


class TaskSetResult(GroupResult):
"""Deprecated version of :class:`GroupResult`"""

def __init__(self, taskset_id, results=None, **kwargs):
# XXX supports the taskset_id kwarg.
# XXX previously the "results" arg was named "subtasks".
if 'subtasks' in kwargs:
results = kwargs['subtasks']
GroupResult.__init__(self, taskset_id, results, **kwargs)

def itersubtasks(self):
"""Deprecated. Use ``iter(self.results)`` instead."""
return iter(self.results)

@property
def total(self):
"""Deprecated: Use ``len(r)``."""
return len(self)

@property
def taskset_id(self):
"""compat alias to :attr:`self.id`"""
return self.id

@taskset_id.setter # noqa
def taskset_id(self, id):
self.id = id


class EagerResult(AsyncResult):
"""Result that we know has already been executed."""
task_name = None

def __init__(self, id, ret_value, state, traceback=None):
self.id = id
self._result = ret_value
self._state = state
self._traceback = traceback

def _get_task_meta(self):
return {'task_id': self.id, 'result': self._result, 'status':
self._state, 'traceback': self._traceback}

def __reduce__(self):
return self.__class__, self.__reduce_args__()

def __reduce_args__(self):
return (self.id, self._result, self._state, self._traceback)

def __copy__(self):
cls, args = self.__reduce__()
return cls(*args)

def ready(self):
return True

def get(self, timeout=None, propagate=True, **kwargs):
if self.successful():
return self.result
elif self.state in states.PROPAGATE_STATES:
if propagate:
raise self.result
return self.result
wait = get

def forget(self):
pass

def revoke(self, *args, **kwargs):
self._state = states.REVOKED

def __repr__(self):
return '<EagerResult: {0.id}>'.format(self)

@property
def result(self):
"""The tasks return value"""
return self._result

@property
def state(self):
"""The tasks state."""
return self._state
status = state

@property
def traceback(self):
"""The traceback if the task failed."""
return self._traceback

@property
def supports_native_join(self):
return False


def result_from_tuple(r, app=None):
# earlier backends may just pickle, so check if
# result is already prepared.
app = app_or_default(app)
Result = app.AsyncResult
if not isinstance(r, ResultBase):
res, nodes = r
if nodes:
return app.GroupResult(
res, [result_from_tuple(child, app) for child in nodes],
)
# previously did not include parent
id, parent = res if isinstance(res, (list, tuple)) else (res, None)
if parent:
parent = result_from_tuple(parent, app)
return Result(id, parent=parent)
return r
from_serializable = result_from_tuple # XXX compat

+ 0
- 593
thesisenv/lib/python3.6/site-packages/celery/schedules.py View File

# -*- coding: utf-8 -*-
"""
celery.schedules
~~~~~~~~~~~~~~~~

Schedules define the intervals at which periodic tasks
should run.

"""
from __future__ import absolute_import

import numbers
import re

from collections import namedtuple
from datetime import datetime, timedelta

from kombu.utils import cached_property

from . import current_app
from .five import range, string_t
from .utils import is_iterable
from .utils.timeutils import (
timedelta_seconds, weekday, maybe_timedelta, remaining,
humanize_seconds, timezone, maybe_make_aware, ffwd
)
from .datastructures import AttributeDict

__all__ = ['ParseException', 'schedule', 'crontab', 'crontab_parser',
'maybe_schedule']

schedstate = namedtuple('schedstate', ('is_due', 'next'))


CRON_PATTERN_INVALID = """\
Invalid crontab pattern. Valid range is {min}-{max}. \
'{value}' was found.\
"""

CRON_INVALID_TYPE = """\
Argument cronspec needs to be of any of the following types: \
int, str, or an iterable type. {type!r} was given.\
"""

CRON_REPR = """\
<crontab: {0._orig_minute} {0._orig_hour} {0._orig_day_of_week} \
{0._orig_day_of_month} {0._orig_month_of_year} (m/h/d/dM/MY)>\
"""


def cronfield(s):
return '*' if s is None else s


class ParseException(Exception):
"""Raised by crontab_parser when the input can't be parsed."""


class schedule(object):
"""Schedule for periodic task.

:param run_every: Interval in seconds (or a :class:`~datetime.timedelta`).
:param relative: If set to True the run time will be rounded to the
resolution of the interval.
:param nowfun: Function returning the current date and time
(class:`~datetime.datetime`).
:param app: Celery app instance.

"""
relative = False

def __init__(self, run_every=None, relative=False, nowfun=None, app=None):
self.run_every = maybe_timedelta(run_every)
self.relative = relative
self.nowfun = nowfun
self._app = app

def now(self):
return (self.nowfun or self.app.now)()

def remaining_estimate(self, last_run_at):
return remaining(
self.maybe_make_aware(last_run_at), self.run_every,
self.maybe_make_aware(self.now()), self.relative,
)

def is_due(self, last_run_at):
"""Returns tuple of two items `(is_due, next_time_to_check)`,
where next time to check is in seconds.

e.g.

* `(True, 20)`, means the task should be run now, and the next
time to check is in 20 seconds.

* `(False, 12.3)`, means the task is not due, but that the scheduler
should check again in 12.3 seconds.

The next time to check is used to save energy/cpu cycles,
it does not need to be accurate but will influence the precision
of your schedule. You must also keep in mind
the value of :setting:`CELERYBEAT_MAX_LOOP_INTERVAL`,
which decides the maximum number of seconds the scheduler can
sleep between re-checking the periodic task intervals. So if you
have a task that changes schedule at runtime then your next_run_at
check will decide how long it will take before a change to the
schedule takes effect. The max loop interval takes precendence
over the next check at value returned.

.. admonition:: Scheduler max interval variance

The default max loop interval may vary for different schedulers.
For the default scheduler the value is 5 minutes, but for e.g.
the django-celery database scheduler the value is 5 seconds.

"""
last_run_at = self.maybe_make_aware(last_run_at)
rem_delta = self.remaining_estimate(last_run_at)
remaining_s = timedelta_seconds(rem_delta)
if remaining_s == 0:
return schedstate(is_due=True, next=self.seconds)
return schedstate(is_due=False, next=remaining_s)

def maybe_make_aware(self, dt):
if self.utc_enabled:
return maybe_make_aware(dt, self.tz)
return dt

def __repr__(self):
return '<freq: {0.human_seconds}>'.format(self)

def __eq__(self, other):
if isinstance(other, schedule):
return self.run_every == other.run_every
return self.run_every == other

def __ne__(self, other):
return not self.__eq__(other)

def __reduce__(self):
return self.__class__, (self.run_every, self.relative, self.nowfun)

@property
def seconds(self):
return timedelta_seconds(self.run_every)

@property
def human_seconds(self):
return humanize_seconds(self.seconds)

@property
def app(self):
return self._app or current_app._get_current_object()

@app.setter # noqa
def app(self, app):
self._app = app

@cached_property
def tz(self):
return self.app.timezone

@cached_property
def utc_enabled(self):
return self.app.conf.CELERY_ENABLE_UTC

def to_local(self, dt):
if not self.utc_enabled:
return timezone.to_local_fallback(dt)
return dt


class crontab_parser(object):
"""Parser for crontab expressions. Any expression of the form 'groups'
(see BNF grammar below) is accepted and expanded to a set of numbers.
These numbers represent the units of time that the crontab needs to
run on::

digit :: '0'..'9'
dow :: 'a'..'z'
number :: digit+ | dow+
steps :: number
range :: number ( '-' number ) ?
numspec :: '*' | range
expr :: numspec ( '/' steps ) ?
groups :: expr ( ',' expr ) *

The parser is a general purpose one, useful for parsing hours, minutes and
day_of_week expressions. Example usage::

>>> minutes = crontab_parser(60).parse('*/15')
[0, 15, 30, 45]
>>> hours = crontab_parser(24).parse('*/4')
[0, 4, 8, 12, 16, 20]
>>> day_of_week = crontab_parser(7).parse('*')
[0, 1, 2, 3, 4, 5, 6]

It can also parse day_of_month and month_of_year expressions if initialized
with an minimum of 1. Example usage::

>>> days_of_month = crontab_parser(31, 1).parse('*/3')
[1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 31]
>>> months_of_year = crontab_parser(12, 1).parse('*/2')
[1, 3, 5, 7, 9, 11]
>>> months_of_year = crontab_parser(12, 1).parse('2-12/2')
[2, 4, 6, 8, 10, 12]

The maximum possible expanded value returned is found by the formula::

max_ + min_ - 1

"""
ParseException = ParseException

_range = r'(\w+?)-(\w+)'
_steps = r'/(\w+)?'
_star = r'\*'

def __init__(self, max_=60, min_=0):
self.max_ = max_
self.min_ = min_
self.pats = (
(re.compile(self._range + self._steps), self._range_steps),
(re.compile(self._range), self._expand_range),
(re.compile(self._star + self._steps), self._star_steps),
(re.compile('^' + self._star + '$'), self._expand_star),
)

def parse(self, spec):
acc = set()
for part in spec.split(','):
if not part:
raise self.ParseException('empty part')
acc |= set(self._parse_part(part))
return acc

def _parse_part(self, part):
for regex, handler in self.pats:
m = regex.match(part)
if m:
return handler(m.groups())
return self._expand_range((part, ))

def _expand_range(self, toks):
fr = self._expand_number(toks[0])
if len(toks) > 1:
to = self._expand_number(toks[1])
if to < fr: # Wrap around max_ if necessary
return (list(range(fr, self.min_ + self.max_)) +
list(range(self.min_, to + 1)))
return list(range(fr, to + 1))
return [fr]

def _range_steps(self, toks):
if len(toks) != 3 or not toks[2]:
raise self.ParseException('empty filter')
return self._expand_range(toks[:2])[::int(toks[2])]

def _star_steps(self, toks):
if not toks or not toks[0]:
raise self.ParseException('empty filter')
return self._expand_star()[::int(toks[0])]

def _expand_star(self, *args):
return list(range(self.min_, self.max_ + self.min_))

def _expand_number(self, s):
if isinstance(s, string_t) and s[0] == '-':
raise self.ParseException('negative numbers not supported')
try:
i = int(s)
except ValueError:
try:
i = weekday(s)
except KeyError:
raise ValueError('Invalid weekday literal {0!r}.'.format(s))

max_val = self.min_ + self.max_ - 1
if i > max_val:
raise ValueError(
'Invalid end range: {0} > {1}.'.format(i, max_val))
if i < self.min_:
raise ValueError(
'Invalid beginning range: {0} < {1}.'.format(i, self.min_))

return i


class crontab(schedule):
"""A crontab can be used as the `run_every` value of a
:class:`PeriodicTask` to add cron-like scheduling.

Like a :manpage:`cron` job, you can specify units of time of when
you would like the task to execute. It is a reasonably complete
implementation of cron's features, so it should provide a fair
degree of scheduling needs.

You can specify a minute, an hour, a day of the week, a day of the
month, and/or a month in the year in any of the following formats:

.. attribute:: minute

- A (list of) integers from 0-59 that represent the minutes of
an hour of when execution should occur; or
- A string representing a crontab pattern. This may get pretty
advanced, like `minute='*/15'` (for every quarter) or
`minute='1,13,30-45,50-59/2'`.

.. attribute:: hour

- A (list of) integers from 0-23 that represent the hours of
a day of when execution should occur; or
- A string representing a crontab pattern. This may get pretty
advanced, like `hour='*/3'` (for every three hours) or
`hour='0,8-17/2'` (at midnight, and every two hours during
office hours).

.. attribute:: day_of_week

- A (list of) integers from 0-6, where Sunday = 0 and Saturday =
6, that represent the days of a week that execution should
occur.
- A string representing a crontab pattern. This may get pretty
advanced, like `day_of_week='mon-fri'` (for weekdays only).
(Beware that `day_of_week='*/2'` does not literally mean
'every two days', but 'every day that is divisible by two'!)

.. attribute:: day_of_month

- A (list of) integers from 1-31 that represents the days of the
month that execution should occur.
- A string representing a crontab pattern. This may get pretty
advanced, such as `day_of_month='2-30/3'` (for every even
numbered day) or `day_of_month='1-7,15-21'` (for the first and
third weeks of the month).

.. attribute:: month_of_year

- A (list of) integers from 1-12 that represents the months of
the year during which execution can occur.
- A string representing a crontab pattern. This may get pretty
advanced, such as `month_of_year='*/3'` (for the first month
of every quarter) or `month_of_year='2-12/2'` (for every even
numbered month).

.. attribute:: nowfun

Function returning the current date and time
(:class:`~datetime.datetime`).

.. attribute:: app

The Celery app instance.

It is important to realize that any day on which execution should
occur must be represented by entries in all three of the day and
month attributes. For example, if `day_of_week` is 0 and `day_of_month`
is every seventh day, only months that begin on Sunday and are also
in the `month_of_year` attribute will have execution events. Or,
`day_of_week` is 1 and `day_of_month` is '1-7,15-21' means every
first and third monday of every month present in `month_of_year`.

"""

def __init__(self, minute='*', hour='*', day_of_week='*',
day_of_month='*', month_of_year='*', nowfun=None, app=None):
self._orig_minute = cronfield(minute)
self._orig_hour = cronfield(hour)
self._orig_day_of_week = cronfield(day_of_week)
self._orig_day_of_month = cronfield(day_of_month)
self._orig_month_of_year = cronfield(month_of_year)
self.hour = self._expand_cronspec(hour, 24)
self.minute = self._expand_cronspec(minute, 60)
self.day_of_week = self._expand_cronspec(day_of_week, 7)
self.day_of_month = self._expand_cronspec(day_of_month, 31, 1)
self.month_of_year = self._expand_cronspec(month_of_year, 12, 1)
self.nowfun = nowfun
self._app = app

@staticmethod
def _expand_cronspec(cronspec, max_, min_=0):
"""Takes the given cronspec argument in one of the forms::

int (like 7)
str (like '3-5,*/15', '*', or 'monday')
set (like set([0,15,30,45]))
list (like [8-17])

And convert it to an (expanded) set representing all time unit
values on which the crontab triggers. Only in case of the base
type being 'str', parsing occurs. (It is fast and
happens only once for each crontab instance, so there is no
significant performance overhead involved.)

For the other base types, merely Python type conversions happen.

The argument `max_` is needed to determine the expansion of '*'
and ranges.
The argument `min_` is needed to determine the expansion of '*'
and ranges for 1-based cronspecs, such as day of month or month
of year. The default is sufficient for minute, hour, and day of
week.

"""
if isinstance(cronspec, numbers.Integral):
result = set([cronspec])
elif isinstance(cronspec, string_t):
result = crontab_parser(max_, min_).parse(cronspec)
elif isinstance(cronspec, set):
result = cronspec
elif is_iterable(cronspec):
result = set(cronspec)
else:
raise TypeError(CRON_INVALID_TYPE.format(type=type(cronspec)))

# assure the result does not preceed the min or exceed the max
for number in result:
if number >= max_ + min_ or number < min_:
raise ValueError(CRON_PATTERN_INVALID.format(
min=min_, max=max_ - 1 + min_, value=number))
return result

def _delta_to_next(self, last_run_at, next_hour, next_minute):
"""
Takes a datetime of last run, next minute and hour, and
returns a relativedelta for the next scheduled day and time.
Only called when day_of_month and/or month_of_year cronspec
is specified to further limit scheduled task execution.
"""
from bisect import bisect, bisect_left

datedata = AttributeDict(year=last_run_at.year)
days_of_month = sorted(self.day_of_month)
months_of_year = sorted(self.month_of_year)

def day_out_of_range(year, month, day):
try:
datetime(year=year, month=month, day=day)
except ValueError:
return True
return False

def roll_over():
while 1:
flag = (datedata.dom == len(days_of_month) or
day_out_of_range(datedata.year,
months_of_year[datedata.moy],
days_of_month[datedata.dom]) or
(self.maybe_make_aware(datetime(datedata.year,
months_of_year[datedata.moy],
days_of_month[datedata.dom])) < last_run_at))

if flag:
datedata.dom = 0
datedata.moy += 1
if datedata.moy == len(months_of_year):
datedata.moy = 0
datedata.year += 1
else:
break

if last_run_at.month in self.month_of_year:
datedata.dom = bisect(days_of_month, last_run_at.day)
datedata.moy = bisect_left(months_of_year, last_run_at.month)
else:
datedata.dom = 0
datedata.moy = bisect(months_of_year, last_run_at.month)
if datedata.moy == len(months_of_year):
datedata.moy = 0
roll_over()

while 1:
th = datetime(year=datedata.year,
month=months_of_year[datedata.moy],
day=days_of_month[datedata.dom])
if th.isoweekday() % 7 in self.day_of_week:
break
datedata.dom += 1
roll_over()

return ffwd(year=datedata.year,
month=months_of_year[datedata.moy],
day=days_of_month[datedata.dom],
hour=next_hour,
minute=next_minute,
second=0,
microsecond=0)

def now(self):
return (self.nowfun or self.app.now)()

def __repr__(self):
return CRON_REPR.format(self)

def __reduce__(self):
return (self.__class__, (self._orig_minute,
self._orig_hour,
self._orig_day_of_week,
self._orig_day_of_month,
self._orig_month_of_year), None)

def remaining_delta(self, last_run_at, tz=None, ffwd=ffwd):
tz = tz or self.tz
last_run_at = self.maybe_make_aware(last_run_at)
now = self.maybe_make_aware(self.now())
dow_num = last_run_at.isoweekday() % 7 # Sunday is day 0, not day 7

execute_this_date = (last_run_at.month in self.month_of_year and
last_run_at.day in self.day_of_month and
dow_num in self.day_of_week)

execute_this_hour = (execute_this_date and
last_run_at.day == now.day and
last_run_at.month == now.month and
last_run_at.year == now.year and
last_run_at.hour in self.hour and
last_run_at.minute < max(self.minute))

if execute_this_hour:
next_minute = min(minute for minute in self.minute
if minute > last_run_at.minute)
delta = ffwd(minute=next_minute, second=0, microsecond=0)
else:
next_minute = min(self.minute)
execute_today = (execute_this_date and
last_run_at.hour < max(self.hour))

if execute_today:
next_hour = min(hour for hour in self.hour
if hour > last_run_at.hour)
delta = ffwd(hour=next_hour, minute=next_minute,
second=0, microsecond=0)
else:
next_hour = min(self.hour)
all_dom_moy = (self._orig_day_of_month == '*' and
self._orig_month_of_year == '*')
if all_dom_moy:
next_day = min([day for day in self.day_of_week
if day > dow_num] or self.day_of_week)
add_week = next_day == dow_num

delta = ffwd(weeks=add_week and 1 or 0,
weekday=(next_day - 1) % 7,
hour=next_hour,
minute=next_minute,
second=0,
microsecond=0)
else:
delta = self._delta_to_next(last_run_at,
next_hour, next_minute)
return self.to_local(last_run_at), delta, self.to_local(now)

def remaining_estimate(self, last_run_at, ffwd=ffwd):
"""Returns when the periodic task should run next as a timedelta."""
return remaining(*self.remaining_delta(last_run_at, ffwd=ffwd))

def is_due(self, last_run_at):
"""Returns tuple of two items `(is_due, next_time_to_run)`,
where next time to run is in seconds.

See :meth:`celery.schedules.schedule.is_due` for more information.

"""
rem_delta = self.remaining_estimate(last_run_at)
rem = timedelta_seconds(rem_delta)
due = rem == 0
if due:
rem_delta = self.remaining_estimate(self.now())
rem = timedelta_seconds(rem_delta)
return schedstate(due, rem)

def __eq__(self, other):
if isinstance(other, crontab):
return (other.month_of_year == self.month_of_year and
other.day_of_month == self.day_of_month and
other.day_of_week == self.day_of_week and
other.hour == self.hour and
other.minute == self.minute)
return NotImplemented

def __ne__(self, other):
return not self.__eq__(other)


def maybe_schedule(s, relative=False, app=None):
if s is not None:
if isinstance(s, numbers.Integral):
s = timedelta(seconds=s)
if isinstance(s, timedelta):
return schedule(s, relative, app=app)
else:
s.app = app
return s

+ 0
- 71
thesisenv/lib/python3.6/site-packages/celery/security/__init__.py View File

# -*- coding: utf-8 -*-
"""
celery.security
~~~~~~~~~~~~~~~

Module implementing the signing message serializer.

"""
from __future__ import absolute_import

from kombu.serialization import (
registry, disable_insecure_serializers as _disable_insecure_serializers,
)

from celery.exceptions import ImproperlyConfigured

from .serialization import register_auth

SSL_NOT_INSTALLED = """\
You need to install the pyOpenSSL library to use the auth serializer.
Please install by:

$ pip install pyOpenSSL
"""

SETTING_MISSING = """\
Sorry, but you have to configure the
* CELERY_SECURITY_KEY
* CELERY_SECURITY_CERTIFICATE, and the
* CELERY_SECURITY_CERT_STORE
configuration settings to use the auth serializer.

Please see the configuration reference for more information.
"""

__all__ = ['setup_security']


def setup_security(allowed_serializers=None, key=None, cert=None, store=None,
digest='sha1', serializer='json', app=None):
"""See :meth:`@Celery.setup_security`."""
if app is None:
from celery import current_app
app = current_app._get_current_object()

_disable_insecure_serializers(allowed_serializers)

conf = app.conf
if conf.CELERY_TASK_SERIALIZER != 'auth':
return

try:
from OpenSSL import crypto # noqa
except ImportError:
raise ImproperlyConfigured(SSL_NOT_INSTALLED)

key = key or conf.CELERY_SECURITY_KEY
cert = cert or conf.CELERY_SECURITY_CERTIFICATE
store = store or conf.CELERY_SECURITY_CERT_STORE

if not (key and cert and store):
raise ImproperlyConfigured(SETTING_MISSING)

with open(key) as kf:
with open(cert) as cf:
register_auth(kf.read(), cf.read(), store, digest, serializer)
registry._set_default_serializer('auth')


def disable_untrusted_serializers(whitelist=None):
_disable_insecure_serializers(allowed=whitelist)

+ 0
- 93
thesisenv/lib/python3.6/site-packages/celery/security/certificate.py View File

# -*- coding: utf-8 -*-
"""
celery.security.certificate
~~~~~~~~~~~~~~~~~~~~~~~~~~~

X.509 certificates.

"""
from __future__ import absolute_import

import glob
import os

from kombu.utils.encoding import bytes_to_str

from celery.exceptions import SecurityError
from celery.five import values

from .utils import crypto, reraise_errors

__all__ = ['Certificate', 'CertStore', 'FSCertStore']


class Certificate(object):
"""X.509 certificate."""

def __init__(self, cert):
assert crypto is not None
with reraise_errors('Invalid certificate: {0!r}'):
self._cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)

def has_expired(self):
"""Check if the certificate has expired."""
return self._cert.has_expired()

def get_serial_number(self):
"""Return the serial number in the certificate."""
return bytes_to_str(self._cert.get_serial_number())

def get_issuer(self):
"""Return issuer (CA) as a string"""
return ' '.join(bytes_to_str(x[1]) for x in
self._cert.get_issuer().get_components())

def get_id(self):
"""Serial number/issuer pair uniquely identifies a certificate"""
return '{0} {1}'.format(self.get_issuer(), self.get_serial_number())

def verify(self, data, signature, digest):
"""Verifies the signature for string containing data."""
with reraise_errors('Bad signature: {0!r}'):
crypto.verify(self._cert, signature, data, digest)


class CertStore(object):
"""Base class for certificate stores"""

def __init__(self):
self._certs = {}

def itercerts(self):
"""an iterator over the certificates"""
for c in values(self._certs):
yield c

def __getitem__(self, id):
"""get certificate by id"""
try:
return self._certs[bytes_to_str(id)]
except KeyError:
raise SecurityError('Unknown certificate: {0!r}'.format(id))

def add_cert(self, cert):
cert_id = bytes_to_str(cert.get_id())
if cert_id in self._certs:
raise SecurityError('Duplicate certificate: {0!r}'.format(id))
self._certs[cert_id] = cert


class FSCertStore(CertStore):
"""File system certificate store"""

def __init__(self, path):
CertStore.__init__(self)
if os.path.isdir(path):
path = os.path.join(path, '*')
for p in glob.glob(path):
with open(p) as f:
cert = Certificate(f.read())
if cert.has_expired():
raise SecurityError(
'Expired certificate: {0!r}'.format(cert.get_id()))
self.add_cert(cert)

+ 0
- 27
thesisenv/lib/python3.6/site-packages/celery/security/key.py View File

# -*- coding: utf-8 -*-
"""
celery.security.key
~~~~~~~~~~~~~~~~~~~

Private key for the security serializer.

"""
from __future__ import absolute_import

from kombu.utils.encoding import ensure_bytes

from .utils import crypto, reraise_errors

__all__ = ['PrivateKey']


class PrivateKey(object):

def __init__(self, key):
with reraise_errors('Invalid private key: {0!r}'):
self._key = crypto.load_privatekey(crypto.FILETYPE_PEM, key)

def sign(self, data, digest):
"""sign string containing data."""
with reraise_errors('Unable to sign data: {0!r}'):
return crypto.sign(self._key, ensure_bytes(data), digest)

+ 0
- 110
thesisenv/lib/python3.6/site-packages/celery/security/serialization.py View File

# -*- coding: utf-8 -*-
"""
celery.security.serialization
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Secure serializer.

"""
from __future__ import absolute_import

import base64

from kombu.serialization import registry, dumps, loads
from kombu.utils.encoding import bytes_to_str, str_to_bytes, ensure_bytes

from .certificate import Certificate, FSCertStore
from .key import PrivateKey
from .utils import reraise_errors

__all__ = ['SecureSerializer', 'register_auth']


def b64encode(s):
return bytes_to_str(base64.b64encode(str_to_bytes(s)))


def b64decode(s):
return base64.b64decode(str_to_bytes(s))


class SecureSerializer(object):

def __init__(self, key=None, cert=None, cert_store=None,
digest='sha1', serializer='json'):
self._key = key
self._cert = cert
self._cert_store = cert_store
self._digest = digest
self._serializer = serializer

def serialize(self, data):
"""serialize data structure into string"""
assert self._key is not None
assert self._cert is not None
with reraise_errors('Unable to serialize: {0!r}', (Exception, )):
content_type, content_encoding, body = dumps(
bytes_to_str(data), serializer=self._serializer)
# What we sign is the serialized body, not the body itself.
# this way the receiver doesn't have to decode the contents
# to verify the signature (and thus avoiding potential flaws
# in the decoding step).
body = ensure_bytes(body)
return self._pack(body, content_type, content_encoding,
signature=self._key.sign(body, self._digest),
signer=self._cert.get_id())

def deserialize(self, data):
"""deserialize data structure from string"""
assert self._cert_store is not None
with reraise_errors('Unable to deserialize: {0!r}', (Exception, )):
payload = self._unpack(data)
signature, signer, body = (payload['signature'],
payload['signer'],
payload['body'])
self._cert_store[signer].verify(body, signature, self._digest)
return loads(bytes_to_str(body), payload['content_type'],
payload['content_encoding'], force=True)

def _pack(self, body, content_type, content_encoding, signer, signature,
sep=str_to_bytes('\x00\x01')):
fields = sep.join(
ensure_bytes(s) for s in [signer, signature, content_type,
content_encoding, body]
)
return b64encode(fields)

def _unpack(self, payload, sep=str_to_bytes('\x00\x01')):
raw_payload = b64decode(ensure_bytes(payload))
first_sep = raw_payload.find(sep)

signer = raw_payload[:first_sep]
signer_cert = self._cert_store[signer]

sig_len = signer_cert._cert.get_pubkey().bits() >> 3
signature = raw_payload[
first_sep + len(sep):first_sep + len(sep) + sig_len
]
end_of_sig = first_sep + len(sep) + sig_len + len(sep)

v = raw_payload[end_of_sig:].split(sep)

return {
'signer': signer,
'signature': signature,
'content_type': bytes_to_str(v[0]),
'content_encoding': bytes_to_str(v[1]),
'body': bytes_to_str(v[2]),
}


def register_auth(key=None, cert=None, store=None, digest='sha1',
serializer='json'):
"""register security serializer"""
s = SecureSerializer(key and PrivateKey(key),
cert and Certificate(cert),
store and FSCertStore(store),
digest=digest, serializer=serializer)
registry.register('auth', s.serialize, s.deserialize,
content_type='application/data',
content_encoding='utf-8')

+ 0
- 35
thesisenv/lib/python3.6/site-packages/celery/security/utils.py View File

# -*- coding: utf-8 -*-
"""
celery.security.utils
~~~~~~~~~~~~~~~~~~~~~

Utilities used by the message signing serializer.

"""
from __future__ import absolute_import

import sys

from contextlib import contextmanager

from celery.exceptions import SecurityError
from celery.five import reraise

try:
from OpenSSL import crypto
except ImportError: # pragma: no cover
crypto = None # noqa

__all__ = ['reraise_errors']


@contextmanager
def reraise_errors(msg='{0!r}', errors=None):
assert crypto is not None
errors = (crypto.Error, ) if errors is None else errors
try:
yield
except errors as exc:
reraise(SecurityError,
SecurityError(msg.format(exc)),
sys.exc_info()[2])

+ 0
- 76
thesisenv/lib/python3.6/site-packages/celery/signals.py View File

# -*- coding: utf-8 -*-
"""
celery.signals
~~~~~~~~~~~~~~

This module defines the signals (Observer pattern) sent by
both workers and clients.

Functions can be connected to these signals, and connected
functions are called whenever a signal is called.

See :ref:`signals` for more information.

"""
from __future__ import absolute_import
from .utils.dispatch import Signal

__all__ = ['before_task_publish', 'after_task_publish',
'task_prerun', 'task_postrun', 'task_success',
'task_retry', 'task_failure', 'task_revoked', 'celeryd_init',
'celeryd_after_setup', 'worker_init', 'worker_process_init',
'worker_ready', 'worker_shutdown', 'setup_logging',
'after_setup_logger', 'after_setup_task_logger',
'beat_init', 'beat_embedded_init', 'eventlet_pool_started',
'eventlet_pool_preshutdown', 'eventlet_pool_postshutdown',
'eventlet_pool_apply']

before_task_publish = Signal(providing_args=[
'body', 'exchange', 'routing_key', 'headers', 'properties',
'declare', 'retry_policy',
])
after_task_publish = Signal(providing_args=[
'body', 'exchange', 'routing_key',
])
#: Deprecated, use after_task_publish instead.
task_sent = Signal(providing_args=[
'task_id', 'task', 'args', 'kwargs', 'eta', 'taskset',
])
task_prerun = Signal(providing_args=['task_id', 'task', 'args', 'kwargs'])
task_postrun = Signal(providing_args=[
'task_id', 'task', 'args', 'kwargs', 'retval',
])
task_success = Signal(providing_args=['result'])
task_retry = Signal(providing_args=[
'request', 'reason', 'einfo',
])
task_failure = Signal(providing_args=[
'task_id', 'exception', 'args', 'kwargs', 'traceback', 'einfo',
])
task_revoked = Signal(providing_args=[
'request', 'terminated', 'signum', 'expired',
])
celeryd_init = Signal(providing_args=['instance', 'conf', 'options'])
celeryd_after_setup = Signal(providing_args=['instance', 'conf'])
import_modules = Signal(providing_args=[])
worker_init = Signal(providing_args=[])
worker_process_init = Signal(providing_args=[])
worker_process_shutdown = Signal(providing_args=[])
worker_ready = Signal(providing_args=[])
worker_shutdown = Signal(providing_args=[])
setup_logging = Signal(providing_args=[
'loglevel', 'logfile', 'format', 'colorize',
])
after_setup_logger = Signal(providing_args=[
'logger', 'loglevel', 'logfile', 'format', 'colorize',
])
after_setup_task_logger = Signal(providing_args=[
'logger', 'loglevel', 'logfile', 'format', 'colorize',
])
beat_init = Signal(providing_args=[])
beat_embedded_init = Signal(providing_args=[])
eventlet_pool_started = Signal(providing_args=[])
eventlet_pool_preshutdown = Signal(providing_args=[])
eventlet_pool_postshutdown = Signal(providing_args=[])
eventlet_pool_apply = Signal(providing_args=['target', 'args', 'kwargs'])
user_preload_options = Signal(providing_args=['app', 'options'])

+ 0
- 153
thesisenv/lib/python3.6/site-packages/celery/states.py View File

# -*- coding: utf-8 -*-
"""
celery.states
=============

Built-in task states.

.. _states:

States
------

See :ref:`task-states`.

.. _statesets:

Sets
----

.. state:: READY_STATES

READY_STATES
~~~~~~~~~~~~

Set of states meaning the task result is ready (has been executed).

.. state:: UNREADY_STATES

UNREADY_STATES
~~~~~~~~~~~~~~

Set of states meaning the task result is not ready (has not been executed).

.. state:: EXCEPTION_STATES

EXCEPTION_STATES
~~~~~~~~~~~~~~~~

Set of states meaning the task returned an exception.

.. state:: PROPAGATE_STATES

PROPAGATE_STATES
~~~~~~~~~~~~~~~~

Set of exception states that should propagate exceptions to the user.

.. state:: ALL_STATES

ALL_STATES
~~~~~~~~~~

Set of all possible states.


Misc.
-----

"""
from __future__ import absolute_import

__all__ = ['PENDING', 'RECEIVED', 'STARTED', 'SUCCESS', 'FAILURE',
'REVOKED', 'RETRY', 'IGNORED', 'READY_STATES', 'UNREADY_STATES',
'EXCEPTION_STATES', 'PROPAGATE_STATES', 'precedence', 'state']

#: State precedence.
#: None represents the precedence of an unknown state.
#: Lower index means higher precedence.
PRECEDENCE = ['SUCCESS',
'FAILURE',
None,
'REVOKED',
'STARTED',
'RECEIVED',
'RETRY',
'PENDING']

#: Hash lookup of PRECEDENCE to index
PRECEDENCE_LOOKUP = dict(zip(PRECEDENCE, range(0, len(PRECEDENCE))))
NONE_PRECEDENCE = PRECEDENCE_LOOKUP[None]


def precedence(state):
"""Get the precedence index for state.

Lower index means higher precedence.

"""
try:
return PRECEDENCE_LOOKUP[state]
except KeyError:
return NONE_PRECEDENCE


class state(str):
"""State is a subclass of :class:`str`, implementing comparison
methods adhering to state precedence rules::

>>> from celery.states import state, PENDING, SUCCESS

>>> state(PENDING) < state(SUCCESS)
True

Any custom state is considered to be lower than :state:`FAILURE` and
:state:`SUCCESS`, but higher than any of the other built-in states::

>>> state('PROGRESS') > state(STARTED)
True

>>> state('PROGRESS') > state('SUCCESS')
False

"""

def compare(self, other, fun):
return fun(precedence(self), precedence(other))

def __gt__(self, other):
return precedence(self) < precedence(other)

def __ge__(self, other):
return precedence(self) <= precedence(other)

def __lt__(self, other):
return precedence(self) > precedence(other)

def __le__(self, other):
return precedence(self) >= precedence(other)

#: Task state is unknown (assumed pending since you know the id).
PENDING = 'PENDING'
#: Task was received by a worker.
RECEIVED = 'RECEIVED'
#: Task was started by a worker (:setting:`CELERY_TRACK_STARTED`).
STARTED = 'STARTED'
#: Task succeeded
SUCCESS = 'SUCCESS'
#: Task failed
FAILURE = 'FAILURE'
#: Task was revoked.
REVOKED = 'REVOKED'
#: Task is waiting for retry.
RETRY = 'RETRY'
IGNORED = 'IGNORED'
REJECTED = 'REJECTED'

READY_STATES = frozenset([SUCCESS, FAILURE, REVOKED])
UNREADY_STATES = frozenset([PENDING, RECEIVED, STARTED, RETRY])
EXCEPTION_STATES = frozenset([RETRY, FAILURE, REVOKED])
PROPAGATE_STATES = frozenset([FAILURE, REVOKED])

ALL_STATES = frozenset([PENDING, RECEIVED, STARTED,
SUCCESS, FAILURE, RETRY, REVOKED])

+ 0
- 59
thesisenv/lib/python3.6/site-packages/celery/task/__init__.py View File

# -*- coding: utf-8 -*-
"""
celery.task
~~~~~~~~~~~

This is the old task module, it should not be used anymore,
import from the main 'celery' module instead.
If you're looking for the decorator implementation then that's in
``celery.app.base.Celery.task``.

"""
from __future__ import absolute_import

from celery._state import current_app, current_task as current
from celery.five import LazyModule, recreate_module
from celery.local import Proxy

__all__ = [
'BaseTask', 'Task', 'PeriodicTask', 'task', 'periodic_task',
'group', 'chord', 'subtask', 'TaskSet',
]


STATICA_HACK = True
globals()['kcah_acitats'[::-1].upper()] = False
if STATICA_HACK: # pragma: no cover
# This is never executed, but tricks static analyzers (PyDev, PyCharm,
# pylint, etc.) into knowing the types of these symbols, and what
# they contain.
from celery.canvas import group, chord, subtask
from .base import BaseTask, Task, PeriodicTask, task, periodic_task
from .sets import TaskSet


class module(LazyModule):

def __call__(self, *args, **kwargs):
return self.task(*args, **kwargs)


old_module, new_module = recreate_module( # pragma: no cover
__name__,
by_module={
'celery.task.base': ['BaseTask', 'Task', 'PeriodicTask',
'task', 'periodic_task'],
'celery.canvas': ['group', 'chord', 'subtask'],
'celery.task.sets': ['TaskSet'],
},
base=module,
__package__='celery.task',
__file__=__file__,
__path__=__path__,
__doc__=__doc__,
current=current,
discard_all=Proxy(lambda: current_app.control.purge),
backend_cleanup=Proxy(
lambda: current_app.tasks['celery.backend_cleanup']
),
)

+ 0
- 179
thesisenv/lib/python3.6/site-packages/celery/task/base.py View File

# -*- coding: utf-8 -*-
"""
celery.task.base
~~~~~~~~~~~~~~~~

The task implementation has been moved to :mod:`celery.app.task`.

This contains the backward compatible Task class used in the old API,
and shouldn't be used in new applications.

"""
from __future__ import absolute_import

from kombu import Exchange

from celery import current_app
from celery.app.task import Context, TaskType, Task as BaseTask # noqa
from celery.five import class_property, reclassmethod
from celery.schedules import maybe_schedule
from celery.utils.log import get_task_logger

__all__ = ['Task', 'PeriodicTask', 'task']

#: list of methods that must be classmethods in the old API.
_COMPAT_CLASSMETHODS = (
'delay', 'apply_async', 'retry', 'apply', 'subtask_from_request',
'AsyncResult', 'subtask', '_get_request', '_get_exec_options',
)


class Task(BaseTask):
"""Deprecated Task base class.

Modern applications should use :class:`celery.Task` instead.

"""
abstract = True
__bound__ = False
__v2_compat__ = True

# - Deprecated compat. attributes -:

queue = None
routing_key = None
exchange = None
exchange_type = None
delivery_mode = None
mandatory = False # XXX deprecated
immediate = False # XXX deprecated
priority = None
type = 'regular'
disable_error_emails = False
accept_magic_kwargs = False

from_config = BaseTask.from_config + (
('exchange_type', 'CELERY_DEFAULT_EXCHANGE_TYPE'),
('delivery_mode', 'CELERY_DEFAULT_DELIVERY_MODE'),
)

# In old Celery the @task decorator didn't exist, so one would create
# classes instead and use them directly (e.g. MyTask.apply_async()).
# the use of classmethods was a hack so that it was not necessary
# to instantiate the class before using it, but it has only
# given us pain (like all magic).
for name in _COMPAT_CLASSMETHODS:
locals()[name] = reclassmethod(getattr(BaseTask, name))

@class_property
def request(cls):
return cls._get_request()

@class_property
def backend(cls):
if cls._backend is None:
return cls.app.backend
return cls._backend

@backend.setter
def backend(cls, value): # noqa
cls._backend = value

@classmethod
def get_logger(self, **kwargs):
return get_task_logger(self.name)

@classmethod
def establish_connection(self):
"""Deprecated method used to get a broker connection.

Should be replaced with :meth:`@Celery.connection`
instead, or by acquiring connections from the connection pool:

.. code-block:: python

# using the connection pool
with celery.pool.acquire(block=True) as conn:
...

# establish fresh connection
with celery.connection() as conn:
...
"""
return self._get_app().connection()

def get_publisher(self, connection=None, exchange=None,
exchange_type=None, **options):
"""Deprecated method to get the task publisher (now called producer).

Should be replaced with :class:`@amqp.TaskProducer`:

.. code-block:: python

with celery.connection() as conn:
with celery.amqp.TaskProducer(conn) as prod:
my_task.apply_async(producer=prod)

"""
exchange = self.exchange if exchange is None else exchange
if exchange_type is None:
exchange_type = self.exchange_type
connection = connection or self.establish_connection()
return self._get_app().amqp.TaskProducer(
connection,
exchange=exchange and Exchange(exchange, exchange_type),
routing_key=self.routing_key, **options
)

@classmethod
def get_consumer(self, connection=None, queues=None, **kwargs):
"""Deprecated method used to get consumer for the queue
this task is sent to.

Should be replaced with :class:`@amqp.TaskConsumer` instead:

"""
Q = self._get_app().amqp
connection = connection or self.establish_connection()
if queues is None:
queues = Q.queues[self.queue] if self.queue else Q.default_queue
return Q.TaskConsumer(connection, queues, **kwargs)


class PeriodicTask(Task):
"""A periodic task is a task that adds itself to the
:setting:`CELERYBEAT_SCHEDULE` setting."""
abstract = True
ignore_result = True
relative = False
options = None
compat = True

def __init__(self):
if not hasattr(self, 'run_every'):
raise NotImplementedError(
'Periodic tasks must have a run_every attribute')
self.run_every = maybe_schedule(self.run_every, self.relative)
super(PeriodicTask, self).__init__()

@classmethod
def on_bound(cls, app):
app.conf.CELERYBEAT_SCHEDULE[cls.name] = {
'task': cls.name,
'schedule': cls.run_every,
'args': (),
'kwargs': {},
'options': cls.options or {},
'relative': cls.relative,
}


def task(*args, **kwargs):
"""Deprecated decorator, please use :func:`celery.task`."""
return current_app.task(*args, **dict({'accept_magic_kwargs': False,
'base': Task}, **kwargs))


def periodic_task(*args, **options):
"""Deprecated decorator, please use :setting:`CELERYBEAT_SCHEDULE`."""
return task(**dict({'base': PeriodicTask}, **options))

+ 0
- 220
thesisenv/lib/python3.6/site-packages/celery/task/http.py View File

# -*- coding: utf-8 -*-
"""
celery.task.http
~~~~~~~~~~~~~~~~

Webhook task implementation.

"""
from __future__ import absolute_import

import anyjson
import sys

try:
from urllib.parse import parse_qsl, urlencode, urlparse # Py3
except ImportError: # pragma: no cover
from urllib import urlencode # noqa
from urlparse import urlparse, parse_qsl # noqa

from celery import shared_task, __version__ as celery_version
from celery.five import items, reraise
from celery.utils.log import get_task_logger

__all__ = ['InvalidResponseError', 'RemoteExecuteError', 'UnknownStatusError',
'HttpDispatch', 'dispatch', 'URL']

GET_METHODS = frozenset(['GET', 'HEAD'])
logger = get_task_logger(__name__)


if sys.version_info[0] == 3: # pragma: no cover

from urllib.request import Request, urlopen

def utf8dict(tup):
if not isinstance(tup, dict):
return dict(tup)
return tup

else:

from urllib2 import Request, urlopen # noqa

def utf8dict(tup): # noqa
"""With a dict's items() tuple return a new dict with any utf-8
keys/values encoded."""
return dict(
(k.encode('utf-8'),
v.encode('utf-8') if isinstance(v, unicode) else v) # noqa
for k, v in tup)


class InvalidResponseError(Exception):
"""The remote server gave an invalid response."""


class RemoteExecuteError(Exception):
"""The remote task gave a custom error."""


class UnknownStatusError(InvalidResponseError):
"""The remote server gave an unknown status."""


def extract_response(raw_response, loads=anyjson.loads):
"""Extract the response text from a raw JSON response."""
if not raw_response:
raise InvalidResponseError('Empty response')
try:
payload = loads(raw_response)
except ValueError as exc:
reraise(InvalidResponseError, InvalidResponseError(
str(exc)), sys.exc_info()[2])

status = payload['status']
if status == 'success':
return payload['retval']
elif status == 'failure':
raise RemoteExecuteError(payload.get('reason'))
else:
raise UnknownStatusError(str(status))


class MutableURL(object):
"""Object wrapping a Uniform Resource Locator.

Supports editing the query parameter list.
You can convert the object back to a string, the query will be
properly urlencoded.

Examples

>>> url = URL('http://www.google.com:6580/foo/bar?x=3&y=4#foo')
>>> url.query
{'x': '3', 'y': '4'}
>>> str(url)
'http://www.google.com:6580/foo/bar?y=4&x=3#foo'
>>> url.query['x'] = 10
>>> url.query.update({'George': 'Costanza'})
>>> str(url)
'http://www.google.com:6580/foo/bar?y=4&x=10&George=Costanza#foo'

"""
def __init__(self, url):
self.parts = urlparse(url)
self.query = dict(parse_qsl(self.parts[4]))

def __str__(self):
scheme, netloc, path, params, query, fragment = self.parts
query = urlencode(utf8dict(items(self.query)))
components = [scheme + '://', netloc, path or '/',
';{0}'.format(params) if params else '',
'?{0}'.format(query) if query else '',
'#{0}'.format(fragment) if fragment else '']
return ''.join(c for c in components if c)

def __repr__(self):
return '<{0}: {1}>'.format(type(self).__name__, self)


class HttpDispatch(object):
"""Make task HTTP request and collect the task result.

:param url: The URL to request.
:param method: HTTP method used. Currently supported methods are `GET`
and `POST`.
:param task_kwargs: Task keyword arguments.
:param logger: Logger used for user/system feedback.

"""
user_agent = 'celery/{version}'.format(version=celery_version)
timeout = 5

def __init__(self, url, method, task_kwargs, **kwargs):
self.url = url
self.method = method
self.task_kwargs = task_kwargs
self.logger = kwargs.get('logger') or logger

def make_request(self, url, method, params):
"""Perform HTTP request and return the response."""
request = Request(url, params)
for key, val in items(self.http_headers):
request.add_header(key, val)
response = urlopen(request) # user catches errors.
return response.read()

def dispatch(self):
"""Dispatch callback and return result."""
url = MutableURL(self.url)
params = None
if self.method in GET_METHODS:
url.query.update(self.task_kwargs)
else:
params = urlencode(utf8dict(items(self.task_kwargs)))
raw_response = self.make_request(str(url), self.method, params)
return extract_response(raw_response)

@property
def http_headers(self):
headers = {'User-Agent': self.user_agent}
return headers


@shared_task(name='celery.http_dispatch', bind=True,
url=None, method=None, accept_magic_kwargs=False)
def dispatch(self, url=None, method='GET', **kwargs):
"""Task dispatching to an URL.

:keyword url: The URL location of the HTTP callback task.
:keyword method: Method to use when dispatching the callback. Usually
`GET` or `POST`.
:keyword \*\*kwargs: Keyword arguments to pass on to the HTTP callback.

.. attribute:: url

If this is set, this is used as the default URL for requests.
Default is to require the user of the task to supply the url as an
argument, as this attribute is intended for subclasses.

.. attribute:: method

If this is set, this is the default method used for requests.
Default is to require the user of the task to supply the method as an
argument, as this attribute is intended for subclasses.

"""
return HttpDispatch(
url or self.url, method or self.method, kwargs,
).dispatch()


class URL(MutableURL):
"""HTTP Callback URL

Supports requesting an URL asynchronously.

:param url: URL to request.
:keyword dispatcher: Class used to dispatch the request.
By default this is :func:`dispatch`.

"""
dispatcher = None

def __init__(self, url, dispatcher=None, app=None):
super(URL, self).__init__(url)
self.app = app
self.dispatcher = dispatcher or self.dispatcher
if self.dispatcher is None:
# Get default dispatcher
self.dispatcher = (
self.app.tasks['celery.http_dispatch'] if self.app
else dispatch
)

def get_async(self, **kwargs):
return self.dispatcher.delay(str(self), 'GET', **kwargs)

def post_async(self, **kwargs):
return self.dispatcher.delay(str(self), 'POST', **kwargs)

+ 0
- 88
thesisenv/lib/python3.6/site-packages/celery/task/sets.py View File

# -*- coding: utf-8 -*-
"""
celery.task.sets
~~~~~~~~~~~~~~~~

Old ``group`` implementation, this module should
not be used anymore use :func:`celery.group` instead.

"""
from __future__ import absolute_import

from celery._state import get_current_worker_task
from celery.app import app_or_default
from celery.canvas import maybe_signature # noqa
from celery.utils import uuid, warn_deprecated

from celery.canvas import subtask # noqa

warn_deprecated(
'celery.task.sets and TaskSet', removal='4.0',
alternative="""\
Please use "group" instead (see the Canvas section in the userguide)\
""")


class TaskSet(list):
"""A task containing several subtasks, making it possible
to track how many, or when all of the tasks have been completed.

:param tasks: A list of :class:`subtask` instances.

Example::

>>> from myproj.tasks import refresh_feed

>>> urls = ('http://cnn.com/rss', 'http://bbc.co.uk/rss')
>>> s = TaskSet(refresh_feed.s(url) for url in urls)
>>> taskset_result = s.apply_async()
>>> list_of_return_values = taskset_result.join() # *expensive*

"""
app = None

def __init__(self, tasks=None, app=None, Publisher=None):
self.app = app_or_default(app or self.app)
super(TaskSet, self).__init__(
maybe_signature(t, app=self.app) for t in tasks or []
)
self.Publisher = Publisher or self.app.amqp.TaskProducer
self.total = len(self) # XXX compat

def apply_async(self, connection=None, publisher=None, taskset_id=None):
"""Apply TaskSet."""
app = self.app

if app.conf.CELERY_ALWAYS_EAGER:
return self.apply(taskset_id=taskset_id)

with app.connection_or_acquire(connection) as conn:
setid = taskset_id or uuid()
pub = publisher or self.Publisher(conn)
results = self._async_results(setid, pub)

result = app.TaskSetResult(setid, results)
parent = get_current_worker_task()
if parent:
parent.add_trail(result)
return result

def _async_results(self, taskset_id, publisher):
return [task.apply_async(taskset_id=taskset_id, publisher=publisher)
for task in self]

def apply(self, taskset_id=None):
"""Applies the TaskSet locally by blocking until all tasks return."""
setid = taskset_id or uuid()
return self.app.TaskSetResult(setid, self._sync_results(setid))

def _sync_results(self, taskset_id):
return [task.apply(taskset_id=taskset_id) for task in self]

@property
def tasks(self):
return self

@tasks.setter # noqa
def tasks(self, tasks):
self[:] = tasks

+ 0
- 12
thesisenv/lib/python3.6/site-packages/celery/task/trace.py View File

"""This module has moved to celery.app.trace."""
from __future__ import absolute_import

import sys

from celery.app import trace
from celery.utils import warn_deprecated

warn_deprecated('celery.task.trace', removal='3.2',
alternative='Please use celery.app.trace instead.')

sys.modules[__name__] = trace

+ 0
- 87
thesisenv/lib/python3.6/site-packages/celery/tests/__init__.py View File

from __future__ import absolute_import

import logging
import os
import sys
import warnings

from importlib import import_module

try:
WindowsError = WindowsError # noqa
except NameError:

class WindowsError(Exception):
pass


def setup():
os.environ.update(
# warn if config module not found
C_WNOCONF='yes',
KOMBU_DISABLE_LIMIT_PROTECTION='yes',
)

if os.environ.get('COVER_ALL_MODULES') or '--with-coverage' in sys.argv:
from warnings import catch_warnings
with catch_warnings(record=True):
import_all_modules()
warnings.resetwarnings()
from celery.tests.case import Trap
from celery._state import set_default_app
set_default_app(Trap())


def teardown():
# Don't want SUBDEBUG log messages at finalization.
try:
from multiprocessing.util import get_logger
except ImportError:
pass
else:
get_logger().setLevel(logging.WARNING)

# Make sure test database is removed.
import os
if os.path.exists('test.db'):
try:
os.remove('test.db')
except WindowsError:
pass

# Make sure there are no remaining threads at shutdown.
import threading
remaining_threads = [thread for thread in threading.enumerate()
if thread.getName() != 'MainThread']
if remaining_threads:
sys.stderr.write(
'\n\n**WARNING**: Remaining threads at teardown: %r...\n' % (
remaining_threads))


def find_distribution_modules(name=__name__, file=__file__):
current_dist_depth = len(name.split('.')) - 1
current_dist = os.path.join(os.path.dirname(file),
*([os.pardir] * current_dist_depth))
abs = os.path.abspath(current_dist)
dist_name = os.path.basename(abs)

for dirpath, dirnames, filenames in os.walk(abs):
package = (dist_name + dirpath[len(abs):]).replace('/', '.')
if '__init__.py' in filenames:
yield package
for filename in filenames:
if filename.endswith('.py') and filename != '__init__.py':
yield '.'.join([package, filename])[:-3]


def import_all_modules(name=__name__, file=__file__,
skip=('celery.decorators',
'celery.contrib.batches',
'celery.task')):
for module in find_distribution_modules(name, file):
if not module.startswith(skip):
try:
import_module(module)
except ImportError:
pass

+ 0
- 0
thesisenv/lib/python3.6/site-packages/celery/tests/app/__init__.py View File


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save