Browse Source

added celery and tasks but most important added tag_list

newsletter
Esther Kleinhenz 5 years ago
parent
commit
f5980566ae
100 changed files with 20248 additions and 23 deletions
  1. 5
    0
      application/__init__.py
  2. 42
    0
      application/admin.py
  3. 13
    0
      application/celeryapp.py
  4. 57
    0
      application/email_service.py
  5. 32
    0
      application/forms.py
  6. 51
    0
      application/migrations/0006_auto_20181021_1347.py
  7. 39
    0
      application/models.py
  8. 7
    0
      application/tasks.py
  9. 17
    16
      application/templates/tag_list.html
  10. 7
    5
      application/views.py
  11. 1
    0
      croniter
  12. 177
    0
      log.txt
  13. 14
    2
      mysite/settings.py
  14. 11
    0
      thesisenv/bin/celery
  15. 11
    0
      thesisenv/bin/celerybeat
  16. 11
    0
      thesisenv/bin/celeryd
  17. 11
    0
      thesisenv/bin/celeryd-multi
  18. 106
    0
      thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/DESCRIPTION.rst
  19. 0
    0
      thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/INSTALLER
  20. 130
    0
      thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/METADATA
  21. 37
    0
      thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/RECORD
  22. 6
    0
      thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/WHEEL
  23. 1
    0
      thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/metadata.json
  24. 1
    0
      thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/top_level.txt
  25. 70
    0
      thesisenv/lib/python3.6/site-packages/amqp/__init__.py
  26. 93
    0
      thesisenv/lib/python3.6/site-packages/amqp/abstract_channel.py
  27. 124
    0
      thesisenv/lib/python3.6/site-packages/amqp/basic_message.py
  28. 2550
    0
      thesisenv/lib/python3.6/site-packages/amqp/channel.py
  29. 1008
    0
      thesisenv/lib/python3.6/site-packages/amqp/connection.py
  30. 262
    0
      thesisenv/lib/python3.6/site-packages/amqp/exceptions.py
  31. 191
    0
      thesisenv/lib/python3.6/site-packages/amqp/five.py
  32. 231
    0
      thesisenv/lib/python3.6/site-packages/amqp/method_framing.py
  33. 13
    0
      thesisenv/lib/python3.6/site-packages/amqp/protocol.py
  34. 509
    0
      thesisenv/lib/python3.6/site-packages/amqp/serialization.py
  35. 0
    0
      thesisenv/lib/python3.6/site-packages/amqp/tests/__init__.py
  36. 85
    0
      thesisenv/lib/python3.6/site-packages/amqp/tests/case.py
  37. 35
    0
      thesisenv/lib/python3.6/site-packages/amqp/tests/test_channel.py
  38. 299
    0
      thesisenv/lib/python3.6/site-packages/amqp/transport.py
  39. 102
    0
      thesisenv/lib/python3.6/site-packages/amqp/utils.py
  40. 85
    0
      thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/PKG-INFO
  41. 15
    0
      thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/SOURCES.txt
  42. 1
    0
      thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/dependency_links.txt
  43. 7
    0
      thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/installed-files.txt
  44. 1
    0
      thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/not-zip-safe
  45. 1
    0
      thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/top_level.txt
  46. 142
    0
      thesisenv/lib/python3.6/site-packages/anyjson/__init__.py
  47. 792
    0
      thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/PKG-INFO
  48. 71
    0
      thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/SOURCES.txt
  49. 1
    0
      thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/dependency_links.txt
  50. 67
    0
      thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/installed-files.txt
  51. 1
    0
      thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/not-zip-safe
  52. 2
    0
      thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/top_level.txt
  53. 323
    0
      thesisenv/lib/python3.6/site-packages/billiard/__init__.py
  54. 40
    0
      thesisenv/lib/python3.6/site-packages/billiard/_ext.py
  55. 116
    0
      thesisenv/lib/python3.6/site-packages/billiard/_win.py
  56. 134
    0
      thesisenv/lib/python3.6/site-packages/billiard/common.py
  57. 107
    0
      thesisenv/lib/python3.6/site-packages/billiard/compat.py
  58. 27
    0
      thesisenv/lib/python3.6/site-packages/billiard/connection.py
  59. 165
    0
      thesisenv/lib/python3.6/site-packages/billiard/dummy/__init__.py
  60. 93
    0
      thesisenv/lib/python3.6/site-packages/billiard/dummy/connection.py
  61. 134
    0
      thesisenv/lib/python3.6/site-packages/billiard/einfo.py
  62. 54
    0
      thesisenv/lib/python3.6/site-packages/billiard/exceptions.py
  63. 192
    0
      thesisenv/lib/python3.6/site-packages/billiard/five.py
  64. 580
    0
      thesisenv/lib/python3.6/site-packages/billiard/forking.py
  65. 255
    0
      thesisenv/lib/python3.6/site-packages/billiard/heap.py
  66. 1169
    0
      thesisenv/lib/python3.6/site-packages/billiard/managers.py
  67. 1959
    0
      thesisenv/lib/python3.6/site-packages/billiard/pool.py
  68. 368
    0
      thesisenv/lib/python3.6/site-packages/billiard/process.py
  69. 0
    0
      thesisenv/lib/python3.6/site-packages/billiard/py3/__init__.py
  70. 965
    0
      thesisenv/lib/python3.6/site-packages/billiard/py3/connection.py
  71. 249
    0
      thesisenv/lib/python3.6/site-packages/billiard/py3/reduction.py
  72. 372
    0
      thesisenv/lib/python3.6/site-packages/billiard/queues.py
  73. 10
    0
      thesisenv/lib/python3.6/site-packages/billiard/reduction.py
  74. 248
    0
      thesisenv/lib/python3.6/site-packages/billiard/sharedctypes.py
  75. 449
    0
      thesisenv/lib/python3.6/site-packages/billiard/synchronize.py
  76. 21
    0
      thesisenv/lib/python3.6/site-packages/billiard/tests/__init__.py
  77. 85
    0
      thesisenv/lib/python3.6/site-packages/billiard/tests/compat.py
  78. 108
    0
      thesisenv/lib/python3.6/site-packages/billiard/tests/test_common.py
  79. 12
    0
      thesisenv/lib/python3.6/site-packages/billiard/tests/test_package.py
  80. 145
    0
      thesisenv/lib/python3.6/site-packages/billiard/tests/utils.py
  81. 152
    0
      thesisenv/lib/python3.6/site-packages/billiard/util.py
  82. 428
    0
      thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/DESCRIPTION.rst
  83. 0
    0
      thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/INSTALLER
  84. 500
    0
      thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/METADATA
  85. 496
    0
      thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/RECORD
  86. 6
    0
      thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/WHEEL
  87. 6
    0
      thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/entry_points.txt
  88. 1
    0
      thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/metadata.json
  89. 1
    0
      thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/top_level.txt
  90. 155
    0
      thesisenv/lib/python3.6/site-packages/celery/__init__.py
  91. 54
    0
      thesisenv/lib/python3.6/site-packages/celery/__main__.py
  92. 159
    0
      thesisenv/lib/python3.6/site-packages/celery/_state.py
  93. 150
    0
      thesisenv/lib/python3.6/site-packages/celery/app/__init__.py
  94. 512
    0
      thesisenv/lib/python3.6/site-packages/celery/app/amqp.py
  95. 58
    0
      thesisenv/lib/python3.6/site-packages/celery/app/annotations.py
  96. 675
    0
      thesisenv/lib/python3.6/site-packages/celery/app/base.py
  97. 379
    0
      thesisenv/lib/python3.6/site-packages/celery/app/builtins.py
  98. 317
    0
      thesisenv/lib/python3.6/site-packages/celery/app/control.py
  99. 274
    0
      thesisenv/lib/python3.6/site-packages/celery/app/defaults.py
  100. 0
    0
      thesisenv/lib/python3.6/site-packages/celery/app/log.py

+ 5
- 0
application/__init__.py View File

@@ -0,0 +1,5 @@
from __future__ import absolute_import, unicode_literals
# This will make sure celery is always imported when
# Django starts so that shared_task will use this app.
from .celeryapp import app as celery_app
__all__ = ['celery_app']

+ 42
- 0
application/admin.py View File

@@ -4,6 +4,8 @@ from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User

from .models import Post, CustomUser
from .models import ScheduledReport, ReportRecipient, ScheduledReportGroup
from .forms import ScheduledReportForm


class CustomUserInline(admin.StackedInline):
@@ -20,3 +22,43 @@ admin.site.unregister(User)
admin.site.register(User, UserAdmin)

admin.site.register(Post)


class ReportRecipientAdmin(admin.TabularInline):
model = ReportRecipient
class ScheduledReportAdmin(admin.ModelAdmin):
"""
List display for Scheduled reports in Django admin
"""
model = ScheduledReport
list_display = ('id', 'get_recipients')
inlines = [
ReportRecipientAdmin
]
form = ScheduledReportForm
def get_recipients(self, model):
recipients = model.reportrecep.all().values_list('email', flat=True)
if not recipients:
return 'No recipients added'
recipient_list = ''
for recipient in recipients:
recipient_list = recipient_list + recipient + ', '
return recipient_list[:-2]
get_recipients.short_description = 'Recipients'
get_recipients.allow_tags = True
class ScheduledReportGroupAdmin(admin.ModelAdmin):
"""
List display for ScheduledReportGroup Admin
"""
model = ScheduledReportGroup
list_display = ('get_scheduled_report_name','get_report_name')
def get_scheduled_report_name(self, model):
return model.scheduled_report.subject
def get_report_name(self, model):
return model.report.name
get_scheduled_report_name.short_description = "Scheduled Report Name"
get_report_name.short_description = "Report Name"
show_change_link = True
get_report_name.allow_tags = True
admin.site.register(ScheduledReport, ScheduledReportAdmin)
admin.site.register(ScheduledReportGroup, ScheduledReportGroupAdmin)

+ 13
- 0
application/celeryapp.py View File

@@ -0,0 +1,13 @@
from __future__ import absolute_import
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'application.settings')
from django.conf import settings

app = Celery('application')
# Using a string here means the worker don't have to serialize
# the configuration object to child processes.
app.config_from_object('django.conf:settings')
# Load task modules from all registered Django app configs.
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)

+ 57
- 0
application/email_service.py View File

@@ -0,0 +1,57 @@
from datetime import datetime, timedelta
from django.core.mail import send_mail
from django.template import Template, Context
from django.http import HttpResponse
from django.conf import settings
from .models import ScheduledReport, ScheduledReportGroup, ReportRecipient
class ScheduledReportConfig(object):
def __init__(self, scheduled_report):
"""
Expects a scheduled report object and inititializes
its own scheduled_report attribute with it
"""
self.scheduled_report = scheduled_report
def get_report_config(self):
"""
Returns the configuration related to a scheduled report, needed
to populate the email
"""
return {
"template_context": self._get_related_reports_data(),
"recipients": self._get_report_recipients()
}
def _get_related_reports_data(self):
"""
Returns the list of reports data which needs to be sent out in a scheduled report
"""
pass
def _get_report_recipients(self):
"""
Returns the recipient list for a scheduled report
"""
pass
def create_email_data(content=None):
content = '''
''' + str(content) + ''''''
return content
def send_emails():
current_time = datetime.utcnow()
scheduled_reports = ScheduledReport.objects.filter(next_run_at__lt = current_time)
for scheduled_report in scheduled_reports:
report_config = ScheduledReportConfig(scheduled_report).get_report_config()
""" Specify the template path you want to send out in the email. """
template = Template(create_email_data('path/to/your/email_template.html'))
""" //Create your email html using Django's context processor """
report_template = template.render(Context(report_config['template_context']))
scheduled_report.save()
if not scheduled_report.subject:
""" Handle exception for subject not provided """
if not report_config['recipients']:
""" Handle exception for recipients not provided """
send_mail(
scheduled_report.subject, 'Here is the message.',
settings.EMAIL_HOST_USER, report_config['recipients'],
fail_silently=False, html_message=report_template
)

+ 32
- 0
application/forms.py View File

@@ -1,8 +1,16 @@
from django import forms
from datetime import datetime

from .models import Post, CustomUser
from django.forms import ModelForm, ValidationError
from taggit.forms import *
from django.contrib.auth.forms import UserCreationForm, UserChangeForm

from datetime import datetime
from croniter import croniter
from django.forms import ModelForm, ValidationError
from .models import ScheduledReport

class PostForm(forms.ModelForm):
class Meta:
model = Post
@@ -13,3 +21,27 @@ class NewTagForm(forms.ModelForm):
class Meta:
model = CustomUser
fields = ['m_tags']


class ScheduledReportForm(ModelForm):
class Meta:
model = ScheduledReport
fields = ['subject', 'cron_expression']
fields = ['subject', 'cron_expression']
help_texts = {'cron_expression': 'Scheduled time is considered in UTC'}
def clean(self):
cleaned_data = super(ScheduledReportForm, self).clean()
cron_expression = cleaned_data.get("cron_expression")
try:
iter = croniter(cron_expression, datetime.now())
except:
raise ValidationError("Incorrect cron expression:\
The information you must include is (in order of appearance):\
A number (or list of numbers, or range of numbers), m, representing the minute of the hour\
A number (or list of numbers, or range of numbers), h, representing the hour of the day\
A number (or list of numbers, or range of numbers), dom, representing the day of the month\
A number (or list, or range), or name (or list of names), mon, representing the month of the year\
A number (or list, or range), or name (or list of names), dow, representing the day of the week\
The asterisks (*) in our entry tell cron that for that unit of time, the job should be run every.\
Eg. */5 * * * * cron for executing every 5 mins")
return cleaned_data

+ 51
- 0
application/migrations/0006_auto_20181021_1347.py View File

@@ -0,0 +1,51 @@
# Generated by Django 2.1 on 2018-10-21 11:47

from django.db import migrations, models
import django.db.models.deletion


class Migration(migrations.Migration):

dependencies = [
('application', '0005_auto_20181019_1645'),
]

operations = [
migrations.CreateModel(
name='Report',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('report_text', models.TextField()),
],
),
migrations.CreateModel(
name='ReportRecipient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='ScheduledReport',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=200)),
('last_run_at', models.DateTimeField(blank=True, null=True)),
('next_run_at', models.DateTimeField(blank=True, null=True)),
('cron_expression', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='ScheduledReportGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('report', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='report', to='application.Report')),
('scheduled_report', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='relatedscheduledreport', to='application.ScheduledReport')),
],
),
migrations.AddField(
model_name='reportrecipient',
name='scheduled_report',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reportrecep', to='application.ScheduledReport'),
),
]

+ 39
- 0
application/models.py View File

@@ -2,6 +2,9 @@ from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from taggit.managers import TaggableManager
from datetime import datetime
from croniter import croniter



class CustomUser(models.Model):
@@ -24,3 +27,39 @@ class Post(models.Model):

def __str__(self):
return self.title

class Report(models.Model):
report_text = models.TextField()

class ScheduledReport(models.Model):
"""
Contains email subject and cron expression,to evaluate when the email has to be sent
"""
subject = models.CharField(max_length=200)
last_run_at = models.DateTimeField(null=True, blank=True)
next_run_at = models.DateTimeField(null=True, blank=True)
cron_expression = models.CharField(max_length=200)
def save(self, *args, **kwargs):
"""
function to evaluate "next_run_at" using the cron expression, so that it is updated once the report is sent.
"""
self.last_run_at = datetime.now()
iter = croniter(self.cron_expression, self.last_run_at)
self.next_run_at = iter.get_next(datetime)
super(ScheduledReport, self).save(*args, **kwargs)
def __unicode__(self):
return self.subject

class ScheduledReportGroup(models.Model):
"""
Many to many mapping between reports which will be sent out in a scheduled report
"""
report = models.ForeignKey(Report, related_name='report', on_delete=models.CASCADE)
scheduled_report = models.ForeignKey(ScheduledReport,
related_name='relatedscheduledreport', on_delete=models.CASCADE)
class ReportRecipient(models.Model):
"""
Stores all the recipients of the given scheduled report
"""
email = models.EmailField()
scheduled_report = models.ForeignKey(ScheduledReport, related_name='reportrecep', on_delete=models.CASCADE)

+ 7
- 0
application/tasks.py View File

@@ -0,0 +1,7 @@
from celery.task.schedules import crontab
from celery.decorators import periodic_task
from .email_service import send_emails
# this will run every minute, see http://celeryproject.org/docs/reference/celery.task.schedules.html#celery.task.schedules.crontab
@periodic_task(run_every=crontab(hour="*", minute="*", day_of_week="*"))
def trigger_emails():
send_emails()

+ 17
- 16
application/templates/tag_list.html View File

@@ -1,23 +1,24 @@
{% extends "base.html" %} {% block content %} {% load taggit_templatetags2_tags %}
{% extends "base.html" %} {% block content %}


<div id="">
Your tags:
<ul>
{% for tag in users %}
<li>
</li>
{% endfor %}
</ul>
</div>
<div>
{{ u }} {{ arr }}
</div>
<div>
from List: {% for tag in tags %}

{% for tag in posts %}
<p>{{ tag.name }} </p>{% endfor %}{% endfor %}
{% for post in posts %}
<div class="post">
<div class="date">
{{ post.published_date }}
</div>
<h1>
<a href="{% url 'post_detail' pk=post.pk %}">{{ post.title }}</a>
</h1>
<p>{{ post.text|linebreaks }}</p>
Tags: {% for tag in post.tags.all %}
<a href="{% url 'post_list_by_tag' tag.slug %}">{{ tag.name }}</a>
{% if not forloop.last %}, {% endif %} {% endfor %} <p>
{{ post.author }}
</p>
</div>

{% endfor %}
</div>
{% endblock %}

+ 7
- 5
application/views.py View File

@@ -146,13 +146,15 @@ def student_page(request):
@login_required
def tag_list(request):
log = logging.getLogger('mysite')
u = CustomUser.objects.get(user=request.user)
log.info(u)
tags = Tag.objects.filter(customuser__user = u)
log.info(tags)
u = User.objects.get(username=request.user)
if u:
tags_user = Tag.objects.filter(customuser__user = u)
log.info(tags_user)
arr = []
for tag in tags:
for tag in tags_user:
arr.append(str(tag))
log.info(tag)
posts = Post.objects.filter(tags__in=[tag]).order_by('-published_date')
return render(request, 'tag_list.html', locals())

class TagSearch(TagCanvasListView):

+ 1
- 0
croniter

@@ -0,0 +1 @@
Subproject commit 3273cbc777423138f9b6cfb127de503b63ccd22f

+ 177
- 0
log.txt View File

@@ -138,3 +138,180 @@
[21/Oct/2018 10:35:50] INFO [mysite:184] taggit
[21/Oct/2018 10:35:55] INFO [mysite:184]
[21/Oct/2018 10:38:17] INFO [mysite:184] None
[21/Oct/2018 12:59:59] INFO [mysite:184] None
[22/Oct/2018 14:31:54] INFO [mysite:184] None
[22/Oct/2018 14:32:03] INFO [mysite:150] CustomUser object (25)
[22/Oct/2018 14:40:08] INFO [mysite:150] CustomUser object (25)
[22/Oct/2018 14:43:18] INFO [mysite:150] CustomUser object (25)
[22/Oct/2018 14:43:31] INFO [mysite:150] CustomUser object (25)
[22/Oct/2018 14:50:32] INFO [mysite:150] esthi
[22/Oct/2018 14:50:32] INFO [mysite:152] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 14:51:10] INFO [mysite:150] stefan
[22/Oct/2018 14:51:10] INFO [mysite:152] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 14:56:36] INFO [mysite:150] stefan
[22/Oct/2018 14:56:36] INFO [mysite:152] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 14:56:56] INFO [mysite:150] stefan
[22/Oct/2018 14:56:56] INFO [mysite:152] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 14:57:15] INFO [mysite:150] stefan
[22/Oct/2018 14:57:15] INFO [mysite:152] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 14:57:39] INFO [mysite:150] stefan
[22/Oct/2018 14:57:39] INFO [mysite:152] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 14:58:53] INFO [mysite:150] stefan
[22/Oct/2018 14:58:53] INFO [mysite:152] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 14:59:01] INFO [mysite:150] stefan
[22/Oct/2018 14:59:01] INFO [mysite:152] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 14:59:55] INFO [mysite:150] stefan
[22/Oct/2018 14:59:55] INFO [mysite:152] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 15:02:16] INFO [mysite:150] stefan
[22/Oct/2018 15:02:16] INFO [mysite:152] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 15:03:09] INFO [mysite:150] stefan
[22/Oct/2018 15:03:09] INFO [mysite:152] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 15:03:41] INFO [mysite:150] stefan
[22/Oct/2018 15:03:41] INFO [mysite:152] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 15:05:17] INFO [mysite:184] None
[22/Oct/2018 15:05:21] INFO [mysite:184] hi
[22/Oct/2018 15:07:51] INFO [mysite:185] None
[22/Oct/2018 15:07:53] INFO [mysite:150] stefan
[22/Oct/2018 15:07:53] INFO [mysite:152] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 15:08:37] INFO [mysite:150] stefan
[22/Oct/2018 15:08:37] INFO [mysite:152] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 15:09:08] INFO [mysite:151] stefan
[22/Oct/2018 15:09:08] INFO [mysite:153] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 15:09:58] INFO [mysite:151] stefan
[22/Oct/2018 15:09:58] INFO [mysite:153] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 15:11:00] INFO [mysite:151] stefan
[22/Oct/2018 15:11:00] INFO [mysite:153] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 15:11:41] INFO [mysite:151] stefan
[22/Oct/2018 15:11:41] INFO [mysite:153] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 15:12:14] INFO [mysite:151] stefan
[22/Oct/2018 15:12:14] INFO [mysite:153] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 15:14:15] INFO [mysite:151] stefan
[22/Oct/2018 15:14:15] INFO [mysite:153] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 17:37:38] INFO [mysite:154] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 17:38:19] INFO [mysite:154] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 17:39:44] INFO [mysite:151] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 17:39:44] INFO [mysite:155] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 17:41:03] INFO [mysite:151] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 17:41:19] INFO [mysite:151] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 17:41:20] INFO [mysite:155] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 17:45:07] INFO [mysite:151] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 17:45:07] INFO [mysite:155] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 17:45:32] INFO [mysite:151] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 17:45:32] INFO [mysite:155] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 17:45:47] INFO [mysite:151] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 17:45:47] INFO [mysite:155] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 17:45:47] INFO [mysite:160] ['bamberg']
[22/Oct/2018 17:45:47] INFO [mysite:160] ['bamberg', 'test']
[22/Oct/2018 17:49:21] INFO [mysite:151] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 17:49:21] INFO [mysite:155] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 17:49:21] INFO [mysite:159] ['bamberg']
[22/Oct/2018 17:49:21] INFO [mysite:159] ['bamberg', 'test']
[22/Oct/2018 17:51:20] INFO [mysite:151] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 17:51:20] INFO [mysite:155] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 17:51:20] INFO [mysite:159] ['bamberg']
[22/Oct/2018 17:51:20] INFO [mysite:159] ['bamberg', 'test']
[22/Oct/2018 17:51:27] INFO [mysite:151] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 17:51:27] INFO [mysite:155] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 17:51:28] INFO [mysite:159] ['bamberg']
[22/Oct/2018 17:51:28] INFO [mysite:159] ['bamberg', 'test']
[22/Oct/2018 17:53:13] INFO [mysite:151] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 17:53:55] INFO [mysite:151] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 17:53:55] INFO [mysite:154] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 17:53:55] INFO [mysite:158] ['bamberg']
[22/Oct/2018 17:53:55] INFO [mysite:158] ['bamberg', 'test']
[22/Oct/2018 17:54:44] INFO [mysite:151] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 17:54:44] INFO [mysite:154] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 17:55:02] INFO [mysite:151] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 17:55:02] INFO [mysite:154] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 17:55:02] INFO [mysite:159] ['bamberg']
[22/Oct/2018 17:55:02] INFO [mysite:159] ['bamberg', 'test']
[22/Oct/2018 17:55:14] INFO [mysite:151] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 17:55:14] INFO [mysite:154] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 17:55:14] INFO [mysite:159] <QuerySet []>
[22/Oct/2018 17:55:14] INFO [mysite:159] <QuerySet []>
[22/Oct/2018 17:56:12] INFO [mysite:151] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 17:56:12] INFO [mysite:154] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 17:56:12] INFO [mysite:159] bamberg
[22/Oct/2018 17:56:12] INFO [mysite:159] test
[22/Oct/2018 17:56:34] INFO [mysite:151] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 17:56:34] INFO [mysite:154] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 17:56:34] INFO [mysite:159] bamberg
[22/Oct/2018 17:56:34] INFO [mysite:159] test
[22/Oct/2018 17:57:48] INFO [mysite:151] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 17:57:48] INFO [mysite:154] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 17:57:49] INFO [mysite:159] <QuerySet []>
[22/Oct/2018 17:57:49] INFO [mysite:159] <QuerySet []>
[22/Oct/2018 17:59:46] INFO [mysite:151] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 17:59:46] INFO [mysite:154] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 17:59:46] INFO [mysite:159] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 17:59:46] INFO [mysite:159] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 18:00:06] INFO [mysite:151] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 18:00:06] INFO [mysite:154] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 18:00:06] INFO [mysite:159] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 18:00:06] INFO [mysite:159] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 18:00:15] INFO [mysite:151] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 18:00:15] INFO [mysite:154] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 18:00:15] INFO [mysite:159] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 18:00:15] INFO [mysite:159] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 18:01:25] INFO [mysite:153] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 18:01:25] INFO [mysite:158] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 18:01:25] INFO [mysite:158] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 18:01:45] INFO [mysite:153] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 18:02:47] INFO [mysite:153] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 18:03:12] INFO [mysite:153] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 18:03:12] INFO [mysite:158] <QuerySet []>
[22/Oct/2018 18:03:12] INFO [mysite:158] <QuerySet []>
[22/Oct/2018 18:04:45] INFO [mysite:153] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 18:05:24] INFO [mysite:153] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 18:05:24] INFO [mysite:157] bamberg
[22/Oct/2018 18:05:24] INFO [mysite:159] <QuerySet []>
[22/Oct/2018 18:05:24] INFO [mysite:157] test
[22/Oct/2018 18:05:24] INFO [mysite:159] <QuerySet []>
[22/Oct/2018 18:13:35] INFO [mysite:153] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 18:13:35] INFO [mysite:157] bamberg
[22/Oct/2018 18:14:16] INFO [mysite:153] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 18:14:16] INFO [mysite:157] bamberg
[22/Oct/2018 18:19:16] INFO [mysite:153] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 18:19:16] INFO [mysite:157] bamberg
[22/Oct/2018 18:38:14] INFO [mysite:153] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 18:38:14] INFO [mysite:157] bamberg
[22/Oct/2018 18:41:50] INFO [mysite:153] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 18:41:50] INFO [mysite:157] bamberg
[22/Oct/2018 18:41:50] INFO [mysite:159] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 18:41:50] INFO [mysite:157] test
[22/Oct/2018 18:41:50] INFO [mysite:159] <QuerySet [<Post: second bla>, <Post: first post>, <Post: Third one>]>
[22/Oct/2018 18:45:55] INFO [mysite:153] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 18:45:55] INFO [mysite:157] bamberg
[22/Oct/2018 18:46:39] INFO [mysite:153] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 18:46:39] INFO [mysite:157] bamberg
[22/Oct/2018 18:46:39] INFO [mysite:159] <QuerySet []>
[22/Oct/2018 18:46:39] INFO [mysite:157] test
[22/Oct/2018 18:46:39] INFO [mysite:159] <QuerySet [<Post: first post>, <Post: second bla>, <Post: Third one>]>
[22/Oct/2018 18:47:12] INFO [mysite:153] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 18:47:12] INFO [mysite:157] stefan
[22/Oct/2018 18:47:12] INFO [mysite:159] <QuerySet []>
[22/Oct/2018 18:48:22] INFO [mysite:153] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 18:48:22] INFO [mysite:157] stefan
[22/Oct/2018 18:48:22] INFO [mysite:159] <QuerySet [<Post: Crazy>]>
[22/Oct/2018 18:50:37] INFO [mysite:153] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 18:50:38] INFO [mysite:157] stefan
[22/Oct/2018 18:52:01] INFO [mysite:153] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 18:52:01] INFO [mysite:157] stefan
[22/Oct/2018 18:52:36] INFO [mysite:153] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 18:52:36] INFO [mysite:157] stefan
[22/Oct/2018 18:53:07] INFO [mysite:153] <QuerySet [<Tag: stefan>]>
[22/Oct/2018 18:53:07] INFO [mysite:157] stefan
[22/Oct/2018 18:53:20] INFO [mysite:153] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 18:53:20] INFO [mysite:157] bamberg
[22/Oct/2018 18:53:20] INFO [mysite:157] test
[22/Oct/2018 18:54:05] INFO [mysite:153] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 18:54:05] INFO [mysite:157] bamberg
[22/Oct/2018 18:54:20] INFO [mysite:153] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 18:54:21] INFO [mysite:157] bamberg
[22/Oct/2018 18:54:21] INFO [mysite:157] test
[22/Oct/2018 18:54:46] INFO [mysite:152] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 18:54:46] INFO [mysite:156] bamberg
[22/Oct/2018 18:54:46] INFO [mysite:156] test
[22/Oct/2018 18:55:43] INFO [mysite:152] <QuerySet [<Tag: bamberg>, <Tag: test>]>
[22/Oct/2018 18:55:43] INFO [mysite:156] bamberg
[22/Oct/2018 18:55:43] INFO [mysite:156] test

+ 14
- 2
mysite/settings.py View File

@@ -13,6 +13,7 @@ https://docs.djangoproject.com/en/2.0/ref/settings/
import os
import re
import socket
import djcelery

# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@@ -46,6 +47,8 @@ INSTALLED_APPS = [
'application',
'taggit',
'taggit_templatetags2',
'djcelery',
'kombu.transport.django',
]

MIDDLEWARE = [
@@ -171,7 +174,7 @@ else:
]
print(" --- Live stage --- ")

AUTH_PROFILE_MODULE = 'application.CustomUser'

#Log Configuration
LOGGING = {
@@ -249,4 +252,13 @@ if DEBUG:

DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
}
}

# Celery settings
BROKER_URL = 'django://'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'
CELERYBEAT_SCHEDULER = "djcelery.schedulers.DatabaseScheduler"
djcelery.setup_loader()

+ 11
- 0
thesisenv/bin/celery View File

@@ -0,0 +1,11 @@
#!/Users/Esthi/thesis_ek/thesisenv/bin/python3

# -*- coding: utf-8 -*-
import re
import sys

from celery.__main__ import main

if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

+ 11
- 0
thesisenv/bin/celerybeat View File

@@ -0,0 +1,11 @@
#!/Users/Esthi/thesis_ek/thesisenv/bin/python3

# -*- coding: utf-8 -*-
import re
import sys

from celery.__main__ import _compat_beat

if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(_compat_beat())

+ 11
- 0
thesisenv/bin/celeryd View File

@@ -0,0 +1,11 @@
#!/Users/Esthi/thesis_ek/thesisenv/bin/python3

# -*- coding: utf-8 -*-
import re
import sys

from celery.__main__ import _compat_worker

if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(_compat_worker())

+ 11
- 0
thesisenv/bin/celeryd-multi View File

@@ -0,0 +1,11 @@
#!/Users/Esthi/thesis_ek/thesisenv/bin/python3

# -*- coding: utf-8 -*-
import re
import sys

from celery.__main__ import _compat_multi

if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(_compat_multi())

+ 106
- 0
thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/DESCRIPTION.rst View File

@@ -0,0 +1,106 @@
=====================================================================
Python AMQP 0.9.1 client library
=====================================================================

:Version: 1.4.9
:Web: http://amqp.readthedocs.org/
:Download: http://pypi.python.org/pypi/amqp/
:Source: http://github.com/celery/py-amqp/
:Keywords: amqp, rabbitmq

About
=====

This is a fork of amqplib_ which was originally written by Barry Pederson.
It is maintained by the Celery_ project, and used by `kombu`_ as a pure python
alternative when `librabbitmq`_ is not available.

This library should be API compatible with `librabbitmq`_.

.. _amqplib: http://pypi.python.org/pypi/amqplib
.. _Celery: http://celeryproject.org/
.. _kombu: http://kombu.readthedocs.org/
.. _librabbitmq: http://pypi.python.org/pypi/librabbitmq

Differences from `amqplib`_
===========================

- Supports draining events from multiple channels (``Connection.drain_events``)
- Support for timeouts
- Channels are restored after channel error, instead of having to close the
connection.
- Support for heartbeats

- ``Connection.heartbeat_tick(rate=2)`` must called at regular intervals
(half of the heartbeat value if rate is 2).
- Or some other scheme by using ``Connection.send_heartbeat``.
- Supports RabbitMQ extensions:
- Consumer Cancel Notifications
- by default a cancel results in ``ChannelError`` being raised
- but not if a ``on_cancel`` callback is passed to ``basic_consume``.
- Publisher confirms
- ``Channel.confirm_select()`` enables publisher confirms.
- ``Channel.events['basic_ack'].append(my_callback)`` adds a callback
to be called when a message is confirmed. This callback is then
called with the signature ``(delivery_tag, multiple)``.
- Exchange-to-exchange bindings: ``exchange_bind`` / ``exchange_unbind``.
- ``Channel.confirm_select()`` enables publisher confirms.
- ``Channel.events['basic_ack'].append(my_callback)`` adds a callback
to be called when a message is confirmed. This callback is then
called with the signature ``(delivery_tag, multiple)``.
- Support for ``basic_return``
- Uses AMQP 0-9-1 instead of 0-8.
- ``Channel.access_request`` and ``ticket`` arguments to methods
**removed**.
- Supports the ``arguments`` argument to ``basic_consume``.
- ``internal`` argument to ``exchange_declare`` removed.
- ``auto_delete`` argument to ``exchange_declare`` deprecated
- ``insist`` argument to ``Connection`` removed.
- ``Channel.alerts`` has been removed.
- Support for ``Channel.basic_recover_async``.
- ``Channel.basic_recover`` deprecated.
- Exceptions renamed to have idiomatic names:
- ``AMQPException`` -> ``AMQPError``
- ``AMQPConnectionException`` -> ConnectionError``
- ``AMQPChannelException`` -> ChannelError``
- ``Connection.known_hosts`` removed.
- ``Connection`` no longer supports redirects.
- ``exchange`` argument to ``queue_bind`` can now be empty
to use the "default exchange".
- Adds ``Connection.is_alive`` that tries to detect
whether the connection can still be used.
- Adds ``Connection.connection_errors`` and ``.channel_errors``,
a list of recoverable errors.
- Exposes the underlying socket as ``Connection.sock``.
- Adds ``Channel.no_ack_consumers`` to keep track of consumer tags
that set the no_ack flag.
- Slightly better at error recovery

Further
=======

- Differences between AMQP 0.8 and 0.9.1

http://www.rabbitmq.com/amqp-0-8-to-0-9-1.html

- AMQP 0.9.1 Quick Reference

http://www.rabbitmq.com/amqp-0-9-1-quickref.html

- RabbitMQ Extensions

http://www.rabbitmq.com/extensions.html

- For more information about AMQP, visit

http://www.amqp.org

- For other Python client libraries see:

http://www.rabbitmq.com/devtools.html#python-dev

.. image:: https://d2weczhvl823v0.cloudfront.net/celery/celery/trend.png
:alt: Bitdeli badge
:target: https://bitdeli.com/free



thesisenv/lib/python3.6/site-packages/pip-18.0.dist-info/INSTALLER → thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/INSTALLER View File


+ 130
- 0
thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/METADATA View File

@@ -0,0 +1,130 @@
Metadata-Version: 2.0
Name: amqp
Version: 1.4.9
Summary: Low-level AMQP client for Python (fork of amqplib)
Home-page: http://github.com/celery/py-amqp
Author: Ask Solem
Author-email: pyamqp@celeryproject.org
License: LGPL
Platform: any
Classifier: Development Status :: 5 - Production/Stable
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.0
Classifier: Programming Language :: Python :: 3.1
Classifier: Programming Language :: Python :: 3.2
Classifier: Programming Language :: Python :: 3.3
Classifier: License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: BSD License
Classifier: Operating System :: OS Independent

=====================================================================
Python AMQP 0.9.1 client library
=====================================================================

:Version: 1.4.9
:Web: http://amqp.readthedocs.org/
:Download: http://pypi.python.org/pypi/amqp/
:Source: http://github.com/celery/py-amqp/
:Keywords: amqp, rabbitmq

About
=====

This is a fork of amqplib_ which was originally written by Barry Pederson.
It is maintained by the Celery_ project, and used by `kombu`_ as a pure python
alternative when `librabbitmq`_ is not available.

This library should be API compatible with `librabbitmq`_.

.. _amqplib: http://pypi.python.org/pypi/amqplib
.. _Celery: http://celeryproject.org/
.. _kombu: http://kombu.readthedocs.org/
.. _librabbitmq: http://pypi.python.org/pypi/librabbitmq

Differences from `amqplib`_
===========================

- Supports draining events from multiple channels (``Connection.drain_events``)
- Support for timeouts
- Channels are restored after channel error, instead of having to close the
connection.
- Support for heartbeats

- ``Connection.heartbeat_tick(rate=2)`` must called at regular intervals
(half of the heartbeat value if rate is 2).
- Or some other scheme by using ``Connection.send_heartbeat``.
- Supports RabbitMQ extensions:
- Consumer Cancel Notifications
- by default a cancel results in ``ChannelError`` being raised
- but not if a ``on_cancel`` callback is passed to ``basic_consume``.
- Publisher confirms
- ``Channel.confirm_select()`` enables publisher confirms.
- ``Channel.events['basic_ack'].append(my_callback)`` adds a callback
to be called when a message is confirmed. This callback is then
called with the signature ``(delivery_tag, multiple)``.
- Exchange-to-exchange bindings: ``exchange_bind`` / ``exchange_unbind``.
- ``Channel.confirm_select()`` enables publisher confirms.
- ``Channel.events['basic_ack'].append(my_callback)`` adds a callback
to be called when a message is confirmed. This callback is then
called with the signature ``(delivery_tag, multiple)``.
- Support for ``basic_return``
- Uses AMQP 0-9-1 instead of 0-8.
- ``Channel.access_request`` and ``ticket`` arguments to methods
**removed**.
- Supports the ``arguments`` argument to ``basic_consume``.
- ``internal`` argument to ``exchange_declare`` removed.
- ``auto_delete`` argument to ``exchange_declare`` deprecated
- ``insist`` argument to ``Connection`` removed.
- ``Channel.alerts`` has been removed.
- Support for ``Channel.basic_recover_async``.
- ``Channel.basic_recover`` deprecated.
- Exceptions renamed to have idiomatic names:
- ``AMQPException`` -> ``AMQPError``
- ``AMQPConnectionException`` -> ConnectionError``
- ``AMQPChannelException`` -> ChannelError``
- ``Connection.known_hosts`` removed.
- ``Connection`` no longer supports redirects.
- ``exchange`` argument to ``queue_bind`` can now be empty
to use the "default exchange".
- Adds ``Connection.is_alive`` that tries to detect
whether the connection can still be used.
- Adds ``Connection.connection_errors`` and ``.channel_errors``,
a list of recoverable errors.
- Exposes the underlying socket as ``Connection.sock``.
- Adds ``Channel.no_ack_consumers`` to keep track of consumer tags
that set the no_ack flag.
- Slightly better at error recovery

Further
=======

- Differences between AMQP 0.8 and 0.9.1

http://www.rabbitmq.com/amqp-0-8-to-0-9-1.html

- AMQP 0.9.1 Quick Reference

http://www.rabbitmq.com/amqp-0-9-1-quickref.html

- RabbitMQ Extensions

http://www.rabbitmq.com/extensions.html

- For more information about AMQP, visit

http://www.amqp.org

- For other Python client libraries see:

http://www.rabbitmq.com/devtools.html#python-dev

.. image:: https://d2weczhvl823v0.cloudfront.net/celery/celery/trend.png
:alt: Bitdeli badge
:target: https://bitdeli.com/free



+ 37
- 0
thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/RECORD View File

@@ -0,0 +1,37 @@
amqp/__init__.py,sha256=BeETUDbn4gfRRlDZLbYR87Baj4OfZhhhw7zPw8BcJ34,2126
amqp/abstract_channel.py,sha256=CzinrOqXSVnzVpH5Cdm_zYJaW929AGqeQfRw-PMX74s,3429
amqp/basic_message.py,sha256=owh1E_CBDfu99xKDD4nDFDRf5aHkoIjU3KH8iDd7PWM,3954
amqp/channel.py,sha256=xldCRKo4Jzri8ryLlaozKa5Vp7B-KIilDzfhXKyCjbE,84236
amqp/connection.py,sha256=SGqZ4aYMwpy8C8-WG2XZZ5Vsgxog7dDN57k2UTMV8ck,34235
amqp/exceptions.py,sha256=ywAWGUJbSDpcKpvLgddmu2j4N1nvLWeMtaJIdlZ8TyQ,6852
amqp/five.py,sha256=-KE33qs2B6f9N4PAby-zb6VqQu0UEPgKELjZl-8sx6E,5457
amqp/method_framing.py,sha256=wP9XRw3cL0WXLAC7DpdK2HseTikK3vVm20IS0VYzbTw,8051
amqp/protocol.py,sha256=luFIgRWsD0vy3pupwiSJBaxWvARKTOSm9DrHuAwzk60,310
amqp/serialization.py,sha256=exC7GNCivp4B_5bzui2a-Swlb1MGfKmqnmlB3Jc9xSs,16315
amqp/transport.py,sha256=jjZjSQYRfCmMa8Nba9E-NNLvjWkNix2HYFNJlF00KhQ,10020
amqp/utils.py,sha256=NaBiCf_ZllC7wVYZ0yAcd_uJcmi5UDRD_w3PGVAS9M4,2685
amqp/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
amqp/tests/case.py,sha256=yLcb_0hCb74IuK2b57kP0z8eAEjKvhKALZoi-JSQUmY,1973
amqp/tests/test_channel.py,sha256=xZGBSd9UhfuxqWmXRHy_m1zCGQVguUDoVL00EXLUmVg,1087
amqp-1.4.9.dist-info/DESCRIPTION.rst,sha256=ayW656JUsSRXWw0dojo6CW7PJ6wHqWyd98YASJOnd2M,4028
amqp-1.4.9.dist-info/METADATA,sha256=4yS7juxlaSN_UIjVpa7sD3Of86aCk_2V98LEJgOm3IM,4994
amqp-1.4.9.dist-info/metadata.json,sha256=hIn8inTFt5lK65sSohqxv_qmsblygeJGtaIVh4tRYV4,1175
amqp-1.4.9.dist-info/RECORD,,
amqp-1.4.9.dist-info/top_level.txt,sha256=tWQNmFVhU4UtDgB6Yy2lKqRz7LtOrRcN8_bPFVcVVR8,5
amqp-1.4.9.dist-info/WHEEL,sha256=AvR0WeTpDaxT645bl5FQxUK6NPsTls2ttpcGJg3j1Xg,110
amqp-1.4.9.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
amqp/tests/__pycache__/case.cpython-36.pyc,,
amqp/tests/__pycache__/test_channel.cpython-36.pyc,,
amqp/tests/__pycache__/__init__.cpython-36.pyc,,
amqp/__pycache__/abstract_channel.cpython-36.pyc,,
amqp/__pycache__/exceptions.cpython-36.pyc,,
amqp/__pycache__/connection.cpython-36.pyc,,
amqp/__pycache__/channel.cpython-36.pyc,,
amqp/__pycache__/five.cpython-36.pyc,,
amqp/__pycache__/basic_message.cpython-36.pyc,,
amqp/__pycache__/transport.cpython-36.pyc,,
amqp/__pycache__/utils.cpython-36.pyc,,
amqp/__pycache__/method_framing.cpython-36.pyc,,
amqp/__pycache__/serialization.cpython-36.pyc,,
amqp/__pycache__/__init__.cpython-36.pyc,,
amqp/__pycache__/protocol.cpython-36.pyc,,

+ 6
- 0
thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/WHEEL View File

@@ -0,0 +1,6 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.24.0)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any


+ 1
- 0
thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/metadata.json View File

@@ -0,0 +1 @@
{"license": "LGPL", "name": "amqp", "metadata_version": "2.0", "generator": "bdist_wheel (0.24.0)", "test_requires": [{"requires": ["unittest2 (>=0.4.0)", "nose", "nose-cover3", "coverage (>=3.0)", "mock"]}], "summary": "Low-level AMQP client for Python (fork of amqplib)", "platform": "any", "version": "1.4.9", "extensions": {"python.details": {"project_urls": {"Home": "http://github.com/celery/py-amqp"}, "document_names": {"description": "DESCRIPTION.rst"}, "contacts": [{"role": "author", "email": "pyamqp@celeryproject.org", "name": "Ask Solem"}]}}, "classifiers": ["Development Status :: 5 - Production/Stable", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.0", "Programming Language :: Python :: 3.1", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent"]}

+ 1
- 0
thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/top_level.txt View File

@@ -0,0 +1 @@
amqp

+ 70
- 0
thesisenv/lib/python3.6/site-packages/amqp/__init__.py View File

@@ -0,0 +1,70 @@
"""Low-level AMQP client for Python (fork of amqplib)"""
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
from __future__ import absolute_import

VERSION = (1, 4, 9)
__version__ = '.'.join(map(str, VERSION[0:3])) + ''.join(VERSION[3:])
__author__ = 'Barry Pederson'
__maintainer__ = 'Ask Solem'
__contact__ = 'pyamqp@celeryproject.org'
__homepage__ = 'http://github.com/celery/py-amqp'
__docformat__ = 'restructuredtext'

# -eof meta-

#
# Pull in the public items from the various sub-modules
#
from .basic_message import Message # noqa
from .channel import Channel # noqa
from .connection import Connection # noqa
from .exceptions import ( # noqa
AMQPError,
ConnectionError,
RecoverableConnectionError,
IrrecoverableConnectionError,
ChannelError,
RecoverableChannelError,
IrrecoverableChannelError,
ConsumerCancelled,
ContentTooLarge,
NoConsumers,
ConnectionForced,
InvalidPath,
AccessRefused,
NotFound,
ResourceLocked,
PreconditionFailed,
FrameError,
FrameSyntaxError,
InvalidCommand,
ChannelNotOpen,
UnexpectedFrame,
ResourceError,
NotAllowed,
AMQPNotImplementedError,
InternalError,
error_for_code,
__all__ as _all_exceptions,
)
from .utils import promise # noqa

__all__ = [
'Connection',
'Channel',
'Message',
] + _all_exceptions

+ 93
- 0
thesisenv/lib/python3.6/site-packages/amqp/abstract_channel.py View File

@@ -0,0 +1,93 @@
"""Code common to Connection and Channel objects."""
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
from __future__ import absolute_import

from .exceptions import AMQPNotImplementedError, RecoverableConnectionError
from .serialization import AMQPWriter

__all__ = ['AbstractChannel']


class AbstractChannel(object):
"""Superclass for both the Connection, which is treated
as channel 0, and other user-created Channel objects.

The subclasses must have a _METHOD_MAP class property, mapping
between AMQP method signatures and Python methods.

"""
def __init__(self, connection, channel_id):
self.connection = connection
self.channel_id = channel_id
connection.channels[channel_id] = self
self.method_queue = [] # Higher level queue for methods
self.auto_decode = False

def __enter__(self):
return self

def __exit__(self, *exc_info):
self.close()

def _send_method(self, method_sig, args=bytes(), content=None):
"""Send a method for our channel."""
conn = self.connection
if conn is None:
raise RecoverableConnectionError('connection already closed')

if isinstance(args, AMQPWriter):
args = args.getvalue()

conn.method_writer.write_method(
self.channel_id, method_sig, args, content,
)

def close(self):
"""Close this Channel or Connection"""
raise NotImplementedError('Must be overriden in subclass')

def wait(self, allowed_methods=None, timeout=None):
"""Wait for a method that matches our allowed_methods parameter (the
default value of None means match any method), and dispatch to it."""
method_sig, args, content = self.connection._wait_method(
self.channel_id, allowed_methods, timeout)

return self.dispatch_method(method_sig, args, content)

def dispatch_method(self, method_sig, args, content):
if content and \
self.auto_decode and \
hasattr(content, 'content_encoding'):
try:
content.body = content.body.decode(content.content_encoding)
except Exception:
pass

try:
amqp_method = self._METHOD_MAP[method_sig]
except KeyError:
raise AMQPNotImplementedError(
'Unknown AMQP method {0!r}'.format(method_sig))

if content is None:
return amqp_method(self, args)
else:
return amqp_method(self, args, content)

#: Placeholder, the concrete implementations will have to
#: supply their own versions of _METHOD_MAP
_METHOD_MAP = {}

+ 124
- 0
thesisenv/lib/python3.6/site-packages/amqp/basic_message.py View File

@@ -0,0 +1,124 @@
"""Messages for AMQP"""
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
from __future__ import absolute_import

from .serialization import GenericContent

__all__ = ['Message']


class Message(GenericContent):
"""A Message for use with the Channnel.basic_* methods."""

#: Instances of this class have these attributes, which
#: are passed back and forth as message properties between
#: client and server
PROPERTIES = [
('content_type', 'shortstr'),
('content_encoding', 'shortstr'),
('application_headers', 'table'),
('delivery_mode', 'octet'),
('priority', 'octet'),
('correlation_id', 'shortstr'),
('reply_to', 'shortstr'),
('expiration', 'shortstr'),
('message_id', 'shortstr'),
('timestamp', 'timestamp'),
('type', 'shortstr'),
('user_id', 'shortstr'),
('app_id', 'shortstr'),
('cluster_id', 'shortstr')
]

def __init__(self, body='', children=None, channel=None, **properties):
"""Expected arg types

body: string
children: (not supported)

Keyword properties may include:

content_type: shortstr
MIME content type

content_encoding: shortstr
MIME content encoding

application_headers: table
Message header field table, a dict with string keys,
and string | int | Decimal | datetime | dict values.

delivery_mode: octet
Non-persistent (1) or persistent (2)

priority: octet
The message priority, 0 to 9

correlation_id: shortstr
The application correlation identifier

reply_to: shortstr
The destination to reply to

expiration: shortstr
Message expiration specification

message_id: shortstr
The application message identifier

timestamp: datetime.datetime
The message timestamp

type: shortstr
The message type name

user_id: shortstr
The creating user id

app_id: shortstr
The creating application id

cluster_id: shortstr
Intra-cluster routing identifier

Unicode bodies are encoded according to the 'content_encoding'
argument. If that's None, it's set to 'UTF-8' automatically.

example::

msg = Message('hello world',
content_type='text/plain',
application_headers={'foo': 7})

"""
super(Message, self).__init__(**properties)
self.body = body
self.channel = channel

def __eq__(self, other):
"""Check if the properties and bodies of this Message and another
Message are the same.

Received messages may contain a 'delivery_info' attribute,
which isn't compared.

"""
try:
return (super(Message, self).__eq__(other) and
self.body == other.body)
except AttributeError:
return NotImplemented

+ 2550
- 0
thesisenv/lib/python3.6/site-packages/amqp/channel.py
File diff suppressed because it is too large
View File


+ 1008
- 0
thesisenv/lib/python3.6/site-packages/amqp/connection.py
File diff suppressed because it is too large
View File


+ 262
- 0
thesisenv/lib/python3.6/site-packages/amqp/exceptions.py View File

@@ -0,0 +1,262 @@
"""Exceptions used by amqp"""
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
from __future__ import absolute_import

from struct import pack, unpack

__all__ = [
'AMQPError',
'ConnectionError', 'ChannelError',
'RecoverableConnectionError', 'IrrecoverableConnectionError',
'RecoverableChannelError', 'IrrecoverableChannelError',
'ConsumerCancelled', 'ContentTooLarge', 'NoConsumers',
'ConnectionForced', 'InvalidPath', 'AccessRefused', 'NotFound',
'ResourceLocked', 'PreconditionFailed', 'FrameError', 'FrameSyntaxError',
'InvalidCommand', 'ChannelNotOpen', 'UnexpectedFrame', 'ResourceError',
'NotConfirmed', 'NotAllowed', 'AMQPNotImplementedError', 'InternalError',
]


class AMQPError(Exception):
code = 0

def __init__(self, reply_text=None, method_sig=None,
method_name=None, reply_code=None):
self.message = reply_text
self.reply_code = reply_code or self.code
self.reply_text = reply_text
self.method_sig = method_sig
self.method_name = method_name or ''
if method_sig and not self.method_name:
self.method_name = METHOD_NAME_MAP.get(method_sig, '')
Exception.__init__(self, reply_code,
reply_text, method_sig, self.method_name)

def __str__(self):
if self.method:
return '{0.method}: ({0.reply_code}) {0.reply_text}'.format(self)
return self.reply_text or '<AMQPError: unknown error>'

@property
def method(self):
return self.method_name or self.method_sig


class ConnectionError(AMQPError):
pass


class ChannelError(AMQPError):
pass


class RecoverableChannelError(ChannelError):
pass


class IrrecoverableChannelError(ChannelError):
pass


class RecoverableConnectionError(ConnectionError):
pass


class IrrecoverableConnectionError(ConnectionError):
pass


class Blocked(RecoverableConnectionError):
pass


class ConsumerCancelled(RecoverableConnectionError):
pass


class ContentTooLarge(RecoverableChannelError):
code = 311


class NoConsumers(RecoverableChannelError):
code = 313


class ConnectionForced(RecoverableConnectionError):
code = 320


class InvalidPath(IrrecoverableConnectionError):
code = 402


class AccessRefused(IrrecoverableChannelError):
code = 403


class NotFound(IrrecoverableChannelError):
code = 404


class NotConfirmed(RecoverableConnectionError):
pass


class ResourceLocked(RecoverableChannelError):
code = 405


class PreconditionFailed(IrrecoverableChannelError):
code = 406


class FrameError(IrrecoverableConnectionError):
code = 501


class FrameSyntaxError(IrrecoverableConnectionError):
code = 502


class InvalidCommand(IrrecoverableConnectionError):
code = 503


class ChannelNotOpen(IrrecoverableConnectionError):
code = 504


class UnexpectedFrame(IrrecoverableConnectionError):
code = 505


class ResourceError(RecoverableConnectionError):
code = 506


class NotAllowed(IrrecoverableConnectionError):
code = 530


class AMQPNotImplementedError(IrrecoverableConnectionError):
code = 540


class InternalError(IrrecoverableConnectionError):
code = 541


ERROR_MAP = {
311: ContentTooLarge,
313: NoConsumers,
320: ConnectionForced,
402: InvalidPath,
403: AccessRefused,
404: NotFound,
405: ResourceLocked,
406: PreconditionFailed,
501: FrameError,
502: FrameSyntaxError,
503: InvalidCommand,
504: ChannelNotOpen,
505: UnexpectedFrame,
506: ResourceError,
530: NotAllowed,
540: AMQPNotImplementedError,
541: InternalError,
}


def error_for_code(code, text, method, default):
try:
return ERROR_MAP[code](text, method, reply_code=code)
except KeyError:
return default(text, method, reply_code=code)


def raise_for_code(code, text, method, default):
raise error_for_code(code, text, method, default)


METHOD_NAME_MAP = {
(10, 10): 'Connection.start',
(10, 11): 'Connection.start_ok',
(10, 20): 'Connection.secure',
(10, 21): 'Connection.secure_ok',
(10, 30): 'Connection.tune',
(10, 31): 'Connection.tune_ok',
(10, 40): 'Connection.open',
(10, 41): 'Connection.open_ok',
(10, 50): 'Connection.close',
(10, 51): 'Connection.close_ok',
(20, 10): 'Channel.open',
(20, 11): 'Channel.open_ok',
(20, 20): 'Channel.flow',
(20, 21): 'Channel.flow_ok',
(20, 40): 'Channel.close',
(20, 41): 'Channel.close_ok',
(30, 10): 'Access.request',
(30, 11): 'Access.request_ok',
(40, 10): 'Exchange.declare',
(40, 11): 'Exchange.declare_ok',
(40, 20): 'Exchange.delete',
(40, 21): 'Exchange.delete_ok',
(40, 30): 'Exchange.bind',
(40, 31): 'Exchange.bind_ok',
(40, 40): 'Exchange.unbind',
(40, 41): 'Exchange.unbind_ok',
(50, 10): 'Queue.declare',
(50, 11): 'Queue.declare_ok',
(50, 20): 'Queue.bind',
(50, 21): 'Queue.bind_ok',
(50, 30): 'Queue.purge',
(50, 31): 'Queue.purge_ok',
(50, 40): 'Queue.delete',
(50, 41): 'Queue.delete_ok',
(50, 50): 'Queue.unbind',
(50, 51): 'Queue.unbind_ok',
(60, 10): 'Basic.qos',
(60, 11): 'Basic.qos_ok',
(60, 20): 'Basic.consume',
(60, 21): 'Basic.consume_ok',
(60, 30): 'Basic.cancel',
(60, 31): 'Basic.cancel_ok',
(60, 40): 'Basic.publish',
(60, 50): 'Basic.return',
(60, 60): 'Basic.deliver',
(60, 70): 'Basic.get',
(60, 71): 'Basic.get_ok',
(60, 72): 'Basic.get_empty',
(60, 80): 'Basic.ack',
(60, 90): 'Basic.reject',
(60, 100): 'Basic.recover_async',
(60, 110): 'Basic.recover',
(60, 111): 'Basic.recover_ok',
(60, 120): 'Basic.nack',
(90, 10): 'Tx.select',
(90, 11): 'Tx.select_ok',
(90, 20): 'Tx.commit',
(90, 21): 'Tx.commit_ok',
(90, 30): 'Tx.rollback',
(90, 31): 'Tx.rollback_ok',
(85, 10): 'Confirm.select',
(85, 11): 'Confirm.select_ok',
}


for _method_id, _method_name in list(METHOD_NAME_MAP.items()):
METHOD_NAME_MAP[unpack('>I', pack('>HH', *_method_id))[0]] = _method_name

+ 191
- 0
thesisenv/lib/python3.6/site-packages/amqp/five.py View File

@@ -0,0 +1,191 @@
# -*- coding: utf-8 -*-
"""
celery.five
~~~~~~~~~~~

Compatibility implementations of features
only available in newer Python versions.


"""
from __future__ import absolute_import

# ############# py3k #########################################################
import sys
PY3 = sys.version_info[0] == 3

try:
reload = reload # noqa
except NameError: # pragma: no cover
from imp import reload # noqa

try:
from UserList import UserList # noqa
except ImportError: # pragma: no cover
from collections import UserList # noqa

try:
from UserDict import UserDict # noqa
except ImportError: # pragma: no cover
from collections import UserDict # noqa


if PY3:
import builtins

from queue import Queue, Empty
from itertools import zip_longest
from io import StringIO, BytesIO

map = map
string = str
string_t = str
long_t = int
text_t = str
range = range
int_types = (int, )

open_fqdn = 'builtins.open'

def items(d):
return d.items()

def keys(d):
return d.keys()

def values(d):
return d.values()

def nextfun(it):
return it.__next__

exec_ = getattr(builtins, 'exec')

def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value

class WhateverIO(StringIO):

def write(self, data):
if isinstance(data, bytes):
data = data.encode()
StringIO.write(self, data)

else:
import __builtin__ as builtins # noqa
from Queue import Queue, Empty # noqa
from itertools import imap as map, izip_longest as zip_longest # noqa
from StringIO import StringIO # noqa
string = unicode # noqa
string_t = basestring # noqa
text_t = unicode
long_t = long # noqa
range = xrange
int_types = (int, long)

open_fqdn = '__builtin__.open'

def items(d): # noqa
return d.iteritems()

def keys(d): # noqa
return d.iterkeys()

def values(d): # noqa
return d.itervalues()

def nextfun(it): # noqa
return it.next

def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")

exec_("""def reraise(tp, value, tb=None): raise tp, value, tb""")

BytesIO = WhateverIO = StringIO # noqa


def with_metaclass(Type, skip_attrs=set(['__dict__', '__weakref__'])):
"""Class decorator to set metaclass.

Works with both Python 3 and Python 3 and it does not add
an extra class in the lookup order like ``six.with_metaclass`` does
(that is -- it copies the original class instead of using inheritance).

"""

def _clone_with_metaclass(Class):
attrs = dict((key, value) for key, value in items(vars(Class))
if key not in skip_attrs)
return Type(Class.__name__, Class.__bases__, attrs)

return _clone_with_metaclass

# ############# time.monotonic ################################################

if sys.version_info < (3, 3):

import platform
SYSTEM = platform.system()

try:
import ctypes
except ImportError: # pragma: no cover
ctypes = None # noqa

if SYSTEM == 'Darwin' and ctypes is not None:
from ctypes.util import find_library
libSystem = ctypes.CDLL(find_library('libSystem.dylib'))
CoreServices = ctypes.CDLL(find_library('CoreServices'),
use_errno=True)
mach_absolute_time = libSystem.mach_absolute_time
mach_absolute_time.restype = ctypes.c_uint64
absolute_to_nanoseconds = CoreServices.AbsoluteToNanoseconds
absolute_to_nanoseconds.restype = ctypes.c_uint64
absolute_to_nanoseconds.argtypes = [ctypes.c_uint64]

def _monotonic():
return absolute_to_nanoseconds(mach_absolute_time()) * 1e-9

elif SYSTEM == 'Linux' and ctypes is not None:
# from stackoverflow:
# questions/1205722/how-do-i-get-monotonic-time-durations-in-python
import os

CLOCK_MONOTONIC = 1 # see <linux/time.h>

class timespec(ctypes.Structure):
_fields_ = [
('tv_sec', ctypes.c_long),
('tv_nsec', ctypes.c_long),
]

librt = ctypes.CDLL('librt.so.1', use_errno=True)
clock_gettime = librt.clock_gettime
clock_gettime.argtypes = [
ctypes.c_int, ctypes.POINTER(timespec),
]

def _monotonic(): # noqa
t = timespec()
if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(t)) != 0:
errno_ = ctypes.get_errno()
raise OSError(errno_, os.strerror(errno_))
return t.tv_sec + t.tv_nsec * 1e-9
else:
from time import time as _monotonic
try:
from time import monotonic
except ImportError:
monotonic = _monotonic # noqa

+ 231
- 0
thesisenv/lib/python3.6/site-packages/amqp/method_framing.py View File

@@ -0,0 +1,231 @@
"""Convert between frames and higher-level AMQP methods"""
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
from __future__ import absolute_import

from collections import defaultdict, deque
from struct import pack, unpack

from .basic_message import Message
from .exceptions import AMQPError, UnexpectedFrame
from .five import range, string
from .serialization import AMQPReader

__all__ = ['MethodReader']

#
# MethodReader needs to know which methods are supposed
# to be followed by content headers and bodies.
#
_CONTENT_METHODS = [
(60, 50), # Basic.return
(60, 60), # Basic.deliver
(60, 71), # Basic.get_ok
]


class _PartialMessage(object):
"""Helper class to build up a multi-frame method."""

def __init__(self, method_sig, args, channel):
self.method_sig = method_sig
self.args = args
self.msg = Message()
self.body_parts = []
self.body_received = 0
self.body_size = None
self.complete = False

def add_header(self, payload):
class_id, weight, self.body_size = unpack('>HHQ', payload[:12])
self.msg._load_properties(payload[12:])
self.complete = (self.body_size == 0)

def add_payload(self, payload):
parts = self.body_parts
self.body_received += len(payload)
if self.body_received == self.body_size:
if parts:
parts.append(payload)
self.msg.body = bytes().join(parts)
else:
self.msg.body = payload
self.complete = True
else:
parts.append(payload)


class MethodReader(object):
"""Helper class to receive frames from the broker, combine them if
necessary with content-headers and content-bodies into complete methods.

Normally a method is represented as a tuple containing
(channel, method_sig, args, content).

In the case of a framing error, an :exc:`ConnectionError` is placed
in the queue.

In the case of unexpected frames, a tuple made up of
``(channel, ChannelError)`` is placed in the queue.

"""

def __init__(self, source):
self.source = source
self.queue = deque()
self.running = False
self.partial_messages = {}
self.heartbeats = 0
# For each channel, which type is expected next
self.expected_types = defaultdict(lambda: 1)
# not an actual byte count, just incremented whenever we receive
self.bytes_recv = 0
self._quick_put = self.queue.append
self._quick_get = self.queue.popleft

def _next_method(self):
"""Read the next method from the source, once one complete method has
been assembled it is placed in the internal queue."""
queue = self.queue
put = self._quick_put
read_frame = self.source.read_frame
while not queue:
try:
frame_type, channel, payload = read_frame()
except Exception as exc:
#
# Connection was closed? Framing Error?
#
put(exc)
break

self.bytes_recv += 1

if frame_type not in (self.expected_types[channel], 8):
put((
channel,
UnexpectedFrame(
'Received frame {0} while expecting type: {1}'.format(
frame_type, self.expected_types[channel]))))
elif frame_type == 1:
self._process_method_frame(channel, payload)
elif frame_type == 2:
self._process_content_header(channel, payload)
elif frame_type == 3:
self._process_content_body(channel, payload)
elif frame_type == 8:
self._process_heartbeat(channel, payload)

def _process_heartbeat(self, channel, payload):
self.heartbeats += 1

def _process_method_frame(self, channel, payload):
"""Process Method frames"""
method_sig = unpack('>HH', payload[:4])
args = AMQPReader(payload[4:])

if method_sig in _CONTENT_METHODS:
#
# Save what we've got so far and wait for the content-header
#
self.partial_messages[channel] = _PartialMessage(
method_sig, args, channel,
)
self.expected_types[channel] = 2
else:
self._quick_put((channel, method_sig, args, None))

def _process_content_header(self, channel, payload):
"""Process Content Header frames"""
partial = self.partial_messages[channel]
partial.add_header(payload)

if partial.complete:
#
# a bodyless message, we're done
#
self._quick_put((channel, partial.method_sig,
partial.args, partial.msg))
self.partial_messages.pop(channel, None)
self.expected_types[channel] = 1
else:
#
# wait for the content-body
#
self.expected_types[channel] = 3

def _process_content_body(self, channel, payload):
"""Process Content Body frames"""
partial = self.partial_messages[channel]
partial.add_payload(payload)
if partial.complete:
#
# Stick the message in the queue and go back to
# waiting for method frames
#
self._quick_put((channel, partial.method_sig,
partial.args, partial.msg))
self.partial_messages.pop(channel, None)
self.expected_types[channel] = 1

def read_method(self):
"""Read a method from the peer."""
self._next_method()
m = self._quick_get()
if isinstance(m, Exception):
raise m
if isinstance(m, tuple) and isinstance(m[1], AMQPError):
raise m[1]
return m


class MethodWriter(object):
"""Convert AMQP methods into AMQP frames and send them out
to the peer."""

def __init__(self, dest, frame_max):
self.dest = dest
self.frame_max = frame_max
self.bytes_sent = 0

def write_method(self, channel, method_sig, args, content=None):
write_frame = self.dest.write_frame
payload = pack('>HH', method_sig[0], method_sig[1]) + args

if content:
# do this early, so we can raise an exception if there's a
# problem with the content properties, before sending the
# first frame
body = content.body
if isinstance(body, string):
coding = content.properties.get('content_encoding', None)
if coding is None:
coding = content.properties['content_encoding'] = 'UTF-8'

body = body.encode(coding)
properties = content._serialize_properties()

write_frame(1, channel, payload)

if content:
payload = pack('>HHQ', method_sig[0], 0, len(body)) + properties

write_frame(2, channel, payload)

chunk_size = self.frame_max - 8
for i in range(0, len(body), chunk_size):
write_frame(3, channel, body[i:i + chunk_size])
self.bytes_sent += 1

+ 13
- 0
thesisenv/lib/python3.6/site-packages/amqp/protocol.py View File

@@ -0,0 +1,13 @@
from __future__ import absolute_import

from collections import namedtuple


queue_declare_ok_t = namedtuple(
'queue_declare_ok_t', ('queue', 'message_count', 'consumer_count'),
)

basic_return_t = namedtuple(
'basic_return_t',
('reply_code', 'reply_text', 'exchange', 'routing_key', 'message'),
)

+ 509
- 0
thesisenv/lib/python3.6/site-packages/amqp/serialization.py View File

@@ -0,0 +1,509 @@
"""
Convert between bytestreams and higher-level AMQP types.

2007-11-05 Barry Pederson <bp@barryp.org>

"""
# Copyright (C) 2007 Barry Pederson <bp@barryp.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
from __future__ import absolute_import

import calendar
import sys

from datetime import datetime
from decimal import Decimal
from io import BytesIO
from struct import pack, unpack

from .exceptions import FrameSyntaxError
from .five import int_types, long_t, string, string_t, items

IS_PY3K = sys.version_info[0] >= 3

if IS_PY3K:
def byte(n):
return bytes([n])
else:
byte = chr


ILLEGAL_TABLE_TYPE_WITH_KEY = """\
Table type {0!r} for key {1!r} not handled by amqp. [value: {2!r}]
"""

ILLEGAL_TABLE_TYPE = """\
Table type {0!r} not handled by amqp. [value: {1!r}]
"""


class AMQPReader(object):
"""Read higher-level AMQP types from a bytestream."""
def __init__(self, source):
"""Source should be either a file-like object with a read() method, or
a plain (non-unicode) string."""
if isinstance(source, bytes):
self.input = BytesIO(source)
elif hasattr(source, 'read'):
self.input = source
else:
raise ValueError(
'AMQPReader needs a file-like object or plain string')

self.bitcount = self.bits = 0

def close(self):
self.input.close()

def read(self, n):
"""Read n bytes."""
self.bitcount = self.bits = 0
return self.input.read(n)

def read_bit(self):
"""Read a single boolean value."""
if not self.bitcount:
self.bits = ord(self.input.read(1))
self.bitcount = 8
result = (self.bits & 1) == 1
self.bits >>= 1
self.bitcount -= 1
return result

def read_octet(self):
"""Read one byte, return as an integer"""
self.bitcount = self.bits = 0
return unpack('B', self.input.read(1))[0]

def read_short(self):
"""Read an unsigned 16-bit integer"""
self.bitcount = self.bits = 0
return unpack('>H', self.input.read(2))[0]

def read_long(self):
"""Read an unsigned 32-bit integer"""
self.bitcount = self.bits = 0
return unpack('>I', self.input.read(4))[0]

def read_longlong(self):
"""Read an unsigned 64-bit integer"""
self.bitcount = self.bits = 0
return unpack('>Q', self.input.read(8))[0]

def read_float(self):
"""Read float value."""
self.bitcount = self.bits = 0
return unpack('>d', self.input.read(8))[0]

def read_shortstr(self):
"""Read a short string that's stored in up to 255 bytes.

The encoding isn't specified in the AMQP spec, so
assume it's utf-8

"""
self.bitcount = self.bits = 0
slen = unpack('B', self.input.read(1))[0]
return self.input.read(slen).decode('utf-8')

def read_longstr(self):
"""Read a string that's up to 2**32 bytes.

The encoding isn't specified in the AMQP spec, so
assume it's utf-8

"""
self.bitcount = self.bits = 0
slen = unpack('>I', self.input.read(4))[0]
return self.input.read(slen).decode('utf-8')

def read_table(self):
"""Read an AMQP table, and return as a Python dictionary."""
self.bitcount = self.bits = 0
tlen = unpack('>I', self.input.read(4))[0]
table_data = AMQPReader(self.input.read(tlen))
result = {}
while table_data.input.tell() < tlen:
name = table_data.read_shortstr()
val = table_data.read_item()
result[name] = val
return result

def read_item(self):
ftype = ord(self.input.read(1))

# 'S': long string
if ftype == 83:
val = self.read_longstr()
# 's': short string
elif ftype == 115:
val = self.read_shortstr()
# 'b': short-short int
elif ftype == 98:
val, = unpack('>B', self.input.read(1))
# 'B': short-short unsigned int
elif ftype == 66:
val, = unpack('>b', self.input.read(1))
# 'U': short int
elif ftype == 85:
val, = unpack('>h', self.input.read(2))
# 'u': short unsigned int
elif ftype == 117:
val, = unpack('>H', self.input.read(2))
# 'I': long int
elif ftype == 73:
val, = unpack('>i', self.input.read(4))
# 'i': long unsigned int
elif ftype == 105: # 'l'
val, = unpack('>I', self.input.read(4))
# 'L': long long int
elif ftype == 76:
val, = unpack('>q', self.input.read(8))
# 'l': long long unsigned int
elif ftype == 108:
val, = unpack('>Q', self.input.read(8))
# 'f': float
elif ftype == 102:
val, = unpack('>f', self.input.read(4))
# 'd': double
elif ftype == 100:
val = self.read_float()
# 'D': decimal
elif ftype == 68:
d = self.read_octet()
n, = unpack('>i', self.input.read(4))
val = Decimal(n) / Decimal(10 ** d)
# 'F': table
elif ftype == 70:
val = self.read_table() # recurse
# 'A': array
elif ftype == 65:
val = self.read_array()
# 't' (bool)
elif ftype == 116:
val = self.read_bit()
# 'T': timestamp
elif ftype == 84:
val = self.read_timestamp()
# 'V': void
elif ftype == 86:
val = None
else:
raise FrameSyntaxError(
'Unknown value in table: {0!r} ({1!r})'.format(
ftype, type(ftype)))
return val

def read_array(self):
array_length = unpack('>I', self.input.read(4))[0]
array_data = AMQPReader(self.input.read(array_length))
result = []
while array_data.input.tell() < array_length:
val = array_data.read_item()
result.append(val)
return result

def read_timestamp(self):
"""Read and AMQP timestamp, which is a 64-bit integer representing
seconds since the Unix epoch in 1-second resolution.

Return as a Python datetime.datetime object,
expressed as localtime.

"""
return datetime.utcfromtimestamp(self.read_longlong())


class AMQPWriter(object):
"""Convert higher-level AMQP types to bytestreams."""

def __init__(self, dest=None):
"""dest may be a file-type object (with a write() method). If None
then a BytesIO is created, and the contents can be accessed with
this class's getvalue() method."""
self.out = BytesIO() if dest is None else dest
self.bits = []
self.bitcount = 0

def _flushbits(self):
if self.bits:
out = self.out
for b in self.bits:
out.write(pack('B', b))
self.bits = []
self.bitcount = 0

def close(self):
"""Pass through if possible to any file-like destinations."""
try:
self.out.close()
except AttributeError:
pass

def flush(self):
"""Pass through if possible to any file-like destinations."""
try:
self.out.flush()
except AttributeError:
pass

def getvalue(self):
"""Get what's been encoded so far if we're working with a BytesIO."""
self._flushbits()
return self.out.getvalue()

def write(self, s):
"""Write a plain Python string with no special encoding in Python 2.x,
or bytes in Python 3.x"""
self._flushbits()
self.out.write(s)

def write_bit(self, b):
"""Write a boolean value."""
b = 1 if b else 0
shift = self.bitcount % 8
if shift == 0:
self.bits.append(0)
self.bits[-1] |= (b << shift)
self.bitcount += 1

def write_octet(self, n):
"""Write an integer as an unsigned 8-bit value."""
if n < 0 or n > 255:
raise FrameSyntaxError(
'Octet {0!r} out of range 0..255'.format(n))
self._flushbits()
self.out.write(pack('B', n))

def write_short(self, n):
"""Write an integer as an unsigned 16-bit value."""
if n < 0 or n > 65535:
raise FrameSyntaxError(
'Octet {0!r} out of range 0..65535'.format(n))
self._flushbits()
self.out.write(pack('>H', int(n)))

def write_long(self, n):
"""Write an integer as an unsigned2 32-bit value."""
if n < 0 or n >= 4294967296:
raise FrameSyntaxError(
'Octet {0!r} out of range 0..2**31-1'.format(n))
self._flushbits()
self.out.write(pack('>I', n))

def write_longlong(self, n):
"""Write an integer as an unsigned 64-bit value."""
if n < 0 or n >= 18446744073709551616:
raise FrameSyntaxError(
'Octet {0!r} out of range 0..2**64-1'.format(n))
self._flushbits()
self.out.write(pack('>Q', n))

def write_shortstr(self, s):
"""Write a string up to 255 bytes long (after any encoding).

If passed a unicode string, encode with UTF-8.

"""
self._flushbits()
if isinstance(s, string):
s = s.encode('utf-8')
if len(s) > 255:
raise FrameSyntaxError(
'Shortstring overflow ({0} > 255)'.format(len(s)))
self.write_octet(len(s))
self.out.write(s)

def write_longstr(self, s):
"""Write a string up to 2**32 bytes long after encoding.

If passed a unicode string, encode as UTF-8.

"""
self._flushbits()
if isinstance(s, string):
s = s.encode('utf-8')
self.write_long(len(s))
self.out.write(s)

def write_table(self, d):
"""Write out a Python dictionary made of up string keys, and values
that are strings, signed integers, Decimal, datetime.datetime, or
sub-dictionaries following the same constraints."""
self._flushbits()
table_data = AMQPWriter()
for k, v in items(d):
table_data.write_shortstr(k)
table_data.write_item(v, k)
table_data = table_data.getvalue()
self.write_long(len(table_data))
self.out.write(table_data)

def write_item(self, v, k=None):
if isinstance(v, (string_t, bytes)):
if isinstance(v, string):
v = v.encode('utf-8')
self.write(b'S')
self.write_longstr(v)
elif isinstance(v, bool):
self.write(pack('>cB', b't', int(v)))
elif isinstance(v, float):
self.write(pack('>cd', b'd', v))
elif isinstance(v, int_types):
self.write(pack('>ci', b'I', v))
elif isinstance(v, Decimal):
self.write(b'D')
sign, digits, exponent = v.as_tuple()
v = 0
for d in digits:
v = (v * 10) + d
if sign:
v = -v
self.write_octet(-exponent)
self.write(pack('>i', v))
elif isinstance(v, datetime):
self.write(b'T')
self.write_timestamp(v)
elif isinstance(v, dict):
self.write(b'F')
self.write_table(v)
elif isinstance(v, (list, tuple)):
self.write(b'A')
self.write_array(v)
elif v is None:
self.write(b'V')
else:
err = (ILLEGAL_TABLE_TYPE_WITH_KEY.format(type(v), k, v) if k
else ILLEGAL_TABLE_TYPE.format(type(v), v))
raise FrameSyntaxError(err)

def write_array(self, a):
array_data = AMQPWriter()
for v in a:
array_data.write_item(v)
array_data = array_data.getvalue()
self.write_long(len(array_data))
self.out.write(array_data)

def write_timestamp(self, v):
"""Write out a Python datetime.datetime object as a 64-bit integer
representing seconds since the Unix epoch."""
self.out.write(pack('>Q', long_t(calendar.timegm(v.utctimetuple()))))


class GenericContent(object):
"""Abstract base class for AMQP content.

Subclasses should override the PROPERTIES attribute.

"""
PROPERTIES = [('dummy', 'shortstr')]

def __init__(self, **props):
"""Save the properties appropriate to this AMQP content type
in a 'properties' dictionary."""
d = {}
for propname, _ in self.PROPERTIES:
if propname in props:
d[propname] = props[propname]
# FIXME: should we ignore unknown properties?

self.properties = d

def __eq__(self, other):
"""Check if this object has the same properties as another
content object."""
try:
return self.properties == other.properties
except AttributeError:
return NotImplemented

def __getattr__(self, name):
"""Look for additional properties in the 'properties'
dictionary, and if present - the 'delivery_info'
dictionary."""
if name == '__setstate__':
# Allows pickling/unpickling to work
raise AttributeError('__setstate__')

if name in self.properties:
return self.properties[name]

if 'delivery_info' in self.__dict__ \
and name in self.delivery_info:
return self.delivery_info[name]

raise AttributeError(name)

def _load_properties(self, raw_bytes):
"""Given the raw bytes containing the property-flags and property-list
from a content-frame-header, parse and insert into a dictionary
stored in this object as an attribute named 'properties'."""
r = AMQPReader(raw_bytes)

#
# Read 16-bit shorts until we get one with a low bit set to zero
#
flags = []
while 1:
flag_bits = r.read_short()
flags.append(flag_bits)
if flag_bits & 1 == 0:
break

shift = 0
d = {}
for key, proptype in self.PROPERTIES:
if shift == 0:
if not flags:
break
flag_bits, flags = flags[0], flags[1:]
shift = 15
if flag_bits & (1 << shift):
d[key] = getattr(r, 'read_' + proptype)()
shift -= 1

self.properties = d

def _serialize_properties(self):
"""serialize the 'properties' attribute (a dictionary) into
the raw bytes making up a set of property flags and a
property list, suitable for putting into a content frame header."""
shift = 15
flag_bits = 0
flags = []
raw_bytes = AMQPWriter()
for key, proptype in self.PROPERTIES:
val = self.properties.get(key, None)
if val is not None:
if shift == 0:
flags.append(flag_bits)
flag_bits = 0
shift = 15

flag_bits |= (1 << shift)
if proptype != 'bit':
getattr(raw_bytes, 'write_' + proptype)(val)

shift -= 1

flags.append(flag_bits)
result = AMQPWriter()
for flag_bits in flags:
result.write_short(flag_bits)
result.write(raw_bytes.getvalue())

return result.getvalue()

+ 0
- 0
thesisenv/lib/python3.6/site-packages/amqp/tests/__init__.py View File


+ 85
- 0
thesisenv/lib/python3.6/site-packages/amqp/tests/case.py View File

@@ -0,0 +1,85 @@
from __future__ import absolute_import

import sys

from functools import wraps
from io import StringIO

import mock

from nose import SkipTest # noqa

try:
import unittest
unittest.skip
except AttributeError:
import unittest2 as unittest # noqa

PY3 = sys.version_info[0] == 3

patch = mock.patch
call = mock.call


class Case(unittest.TestCase):

def assertItemsEqual(self, a, b, *args, **kwargs):
return self.assertEqual(sorted(a), sorted(b), *args, **kwargs)
assertSameElements = assertItemsEqual


class Mock(mock.Mock):

def __init__(self, *args, **kwargs):
attrs = kwargs.pop('attrs', None) or {}
super(Mock, self).__init__(*args, **kwargs)
for attr_name, attr_value in attrs.items():
setattr(self, attr_name, attr_value)


class _ContextMock(Mock):
"""Dummy class implementing __enter__ and __exit__
as the with statement requires these to be implemented
in the class, not just the instance."""

def __enter__(self):
pass

def __exit__(self, *exc_info):
pass


def ContextMock(*args, **kwargs):
obj = _ContextMock(*args, **kwargs)
obj.attach_mock(Mock(), '__enter__')
obj.attach_mock(Mock(), '__exit__')
obj.__enter__.return_value = obj
# if __exit__ return a value the exception is ignored,
# so it must return None here.
obj.__exit__.return_value = None
return obj


class MockPool(object):

def __init__(self, value=None):
self.value = value or ContextMock()

def acquire(self, **kwargs):
return self.value


def redirect_stdouts(fun):

@wraps(fun)
def _inner(*args, **kwargs):
sys.stdout = StringIO()
sys.stderr = StringIO()
try:
return fun(*args, **dict(kwargs,
stdout=sys.stdout, stderr=sys.stderr))
finally:
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__

return _inner

+ 35
- 0
thesisenv/lib/python3.6/site-packages/amqp/tests/test_channel.py View File

@@ -0,0 +1,35 @@
from __future__ import absolute_import

from collections import defaultdict

from amqp.channel import Channel
from amqp.exceptions import NotConfirmed
from amqp.serialization import AMQPWriter, AMQPReader

from amqp.tests.case import Case, Mock


class NoOpenChannel(Channel):

def _x_open(self):
pass


class test_Channel(Case):

def setUp(self):
self.args = AMQPWriter()
self.connection = Mock(name='connection')
self.connection.channels = defaultdict(lambda: None)
self.channel = NoOpenChannel(self.connection, channel_id=1)

def test_basic_nack(self, delivery_tag=3172312312):
self.args.write_longlong(delivery_tag)
self.args.write_bit(0)
self.args.write_bit(0)
with self.assertRaises(NotConfirmed):
self.channel._basic_nack(AMQPReader(self.args.getvalue()))
callback = Mock(name='callback')
self.channel.events['basic_nack'].add(callback)
self.channel._basic_nack(AMQPReader(self.args.getvalue()))
callback.assert_called_with(delivery_tag, False, False)

+ 299
- 0
thesisenv/lib/python3.6/site-packages/amqp/transport.py View File

@@ -0,0 +1,299 @@
# Copyright (C) 2009 Barry Pederson <bp@barryp.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
from __future__ import absolute_import

import errno
import re
import socket
import ssl

# Jython does not have this attribute
try:
from socket import SOL_TCP
except ImportError: # pragma: no cover
from socket import IPPROTO_TCP as SOL_TCP # noqa

try:
from ssl import SSLError
except ImportError:
class SSLError(Exception): # noqa
pass

from struct import pack, unpack

from .exceptions import UnexpectedFrame
from .utils import get_errno, set_cloexec

_UNAVAIL = errno.EAGAIN, errno.EINTR, errno.ENOENT

AMQP_PORT = 5672

EMPTY_BUFFER = bytes()

# Yes, Advanced Message Queuing Protocol Protocol is redundant
AMQP_PROTOCOL_HEADER = 'AMQP\x01\x01\x00\x09'.encode('latin_1')

# Match things like: [fe80::1]:5432, from RFC 2732
IPV6_LITERAL = re.compile(r'\[([\.0-9a-f:]+)\](?::(\d+))?')


class _AbstractTransport(object):
"""Common superclass for TCP and SSL transports"""
connected = False

def __init__(self, host, connect_timeout):
self.connected = True
msg = None
port = AMQP_PORT

m = IPV6_LITERAL.match(host)
if m:
host = m.group(1)
if m.group(2):
port = int(m.group(2))
else:
if ':' in host:
host, port = host.rsplit(':', 1)
port = int(port)

self.sock = None
last_err = None
for res in socket.getaddrinfo(host, port, 0,
socket.SOCK_STREAM, SOL_TCP):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
try:
set_cloexec(self.sock, True)
except NotImplementedError:
pass
self.sock.settimeout(connect_timeout)
self.sock.connect(sa)
except socket.error as exc:
msg = exc
self.sock.close()
self.sock = None
last_err = msg
continue
break

if not self.sock:
# Didn't connect, return the most recent error message
raise socket.error(last_err)

try:
self.sock.settimeout(None)
self.sock.setsockopt(SOL_TCP, socket.TCP_NODELAY, 1)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)

self._setup_transport()

self._write(AMQP_PROTOCOL_HEADER)
except (OSError, IOError, socket.error) as exc:
if get_errno(exc) not in _UNAVAIL:
self.connected = False
raise

def __del__(self):
try:
# socket module may have been collected by gc
# if this is called by a thread at shutdown.
if socket is not None:
try:
self.close()
except socket.error:
pass
finally:
self.sock = None

def _read(self, n, initial=False):
"""Read exactly n bytes from the peer"""
raise NotImplementedError('Must be overriden in subclass')

def _setup_transport(self):
"""Do any additional initialization of the class (used
by the subclasses)."""
pass

def _shutdown_transport(self):
"""Do any preliminary work in shutting down the connection."""
pass

def _write(self, s):
"""Completely write a string to the peer."""
raise NotImplementedError('Must be overriden in subclass')

def close(self):
if self.sock is not None:
self._shutdown_transport()
# Call shutdown first to make sure that pending messages
# reach the AMQP broker if the program exits after
# calling this method.
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
self.sock = None
self.connected = False

def read_frame(self, unpack=unpack):
read = self._read
read_frame_buffer = EMPTY_BUFFER
try:
frame_header = read(7, True)
read_frame_buffer += frame_header
frame_type, channel, size = unpack('>BHI', frame_header)
payload = read(size)
read_frame_buffer += payload
ch = ord(read(1))
except socket.timeout:
self._read_buffer = read_frame_buffer + self._read_buffer
raise
except (OSError, IOError, socket.error) as exc:
# Don't disconnect for ssl read time outs
# http://bugs.python.org/issue10272
if isinstance(exc, SSLError) and 'timed out' in str(exc):
raise socket.timeout()
if get_errno(exc) not in _UNAVAIL:
self.connected = False
raise
if ch == 206: # '\xce'
return frame_type, channel, payload
else:
raise UnexpectedFrame(
'Received 0x{0:02x} while expecting 0xce'.format(ch))

def write_frame(self, frame_type, channel, payload):
size = len(payload)
try:
self._write(pack(
'>BHI%dsB' % size,
frame_type, channel, size, payload, 0xce,
))
except socket.timeout:
raise
except (OSError, IOError, socket.error) as exc:
if get_errno(exc) not in _UNAVAIL:
self.connected = False
raise


class SSLTransport(_AbstractTransport):
"""Transport that works over SSL"""

def __init__(self, host, connect_timeout, ssl):
if isinstance(ssl, dict):
self.sslopts = ssl
self._read_buffer = EMPTY_BUFFER
super(SSLTransport, self).__init__(host, connect_timeout)

def _setup_transport(self):
"""Wrap the socket in an SSL object."""
if hasattr(self, 'sslopts'):
self.sock = ssl.wrap_socket(self.sock, **self.sslopts)
else:
self.sock = ssl.wrap_socket(self.sock)
self.sock.do_handshake()
self._quick_recv = self.sock.read

def _shutdown_transport(self):
"""Unwrap a Python 2.6 SSL socket, so we can call shutdown()"""
if self.sock is not None:
try:
unwrap = self.sock.unwrap
except AttributeError:
return
self.sock = unwrap()

def _read(self, n, initial=False,
_errnos=(errno.ENOENT, errno.EAGAIN, errno.EINTR)):
# According to SSL_read(3), it can at most return 16kb of data.
# Thus, we use an internal read buffer like TCPTransport._read
# to get the exact number of bytes wanted.
recv = self._quick_recv
rbuf = self._read_buffer
try:
while len(rbuf) < n:
try:
s = recv(n - len(rbuf)) # see note above
except socket.error as exc:
# ssl.sock.read may cause ENOENT if the
# operation couldn't be performed (Issue celery#1414).
if not initial and exc.errno in _errnos:
continue
raise
if not s:
raise IOError('Socket closed')
rbuf += s
except:
self._read_buffer = rbuf
raise
result, self._read_buffer = rbuf[:n], rbuf[n:]
return result

def _write(self, s):
"""Write a string out to the SSL socket fully."""
try:
write = self.sock.write
except AttributeError:
# Works around a bug in python socket library
raise IOError('Socket closed')
else:
while s:
n = write(s)
if not n:
raise IOError('Socket closed')
s = s[n:]


class TCPTransport(_AbstractTransport):
"""Transport that deals directly with TCP socket."""

def _setup_transport(self):
"""Setup to _write() directly to the socket, and
do our own buffered reads."""
self._write = self.sock.sendall
self._read_buffer = EMPTY_BUFFER
self._quick_recv = self.sock.recv

def _read(self, n, initial=False, _errnos=(errno.EAGAIN, errno.EINTR)):
"""Read exactly n bytes from the socket"""
recv = self._quick_recv
rbuf = self._read_buffer
try:
while len(rbuf) < n:
try:
s = recv(n - len(rbuf))
except socket.error as exc:
if not initial and exc.errno in _errnos:
continue
raise
if not s:
raise IOError('Socket closed')
rbuf += s
except:
self._read_buffer = rbuf
raise

result, self._read_buffer = rbuf[:n], rbuf[n:]
return result


def create_transport(host, connect_timeout, ssl=False):
"""Given a few parameters from the Connection constructor,
select and create a subclass of _AbstractTransport."""
if ssl:
return SSLTransport(host, connect_timeout, ssl)
else:
return TCPTransport(host, connect_timeout)

+ 102
- 0
thesisenv/lib/python3.6/site-packages/amqp/utils.py View File

@@ -0,0 +1,102 @@
from __future__ import absolute_import

import sys

try:
import fcntl
except ImportError:
fcntl = None # noqa


class promise(object):
if not hasattr(sys, 'pypy_version_info'):
__slots__ = tuple(
'fun args kwargs value ready failed '
' on_success on_error calls'.split()
)

def __init__(self, fun, args=(), kwargs=(),
on_success=None, on_error=None):
self.fun = fun
self.args = args
self.kwargs = kwargs
self.ready = False
self.failed = False
self.on_success = on_success
self.on_error = on_error
self.value = None
self.calls = 0

def __repr__(self):
return '<$: {0.fun.__name__}(*{0.args!r}, **{0.kwargs!r})'.format(
self,
)

def __call__(self, *args, **kwargs):
try:
self.value = self.fun(
*self.args + args if self.args else args,
**dict(self.kwargs, **kwargs) if self.kwargs else kwargs
)
except Exception as exc:
self.set_error_state(exc)
else:
if self.on_success:
self.on_success(self.value)
finally:
self.ready = True
self.calls += 1

def then(self, callback=None, on_error=None):
self.on_success = callback
self.on_error = on_error
return callback

def set_error_state(self, exc):
self.failed = True
if self.on_error is None:
raise
self.on_error(exc)

def throw(self, exc):
try:
raise exc
except exc.__class__ as with_cause:
self.set_error_state(with_cause)


def noop():
return promise(lambda *a, **k: None)


try:
from os import set_cloexec # Python 3.4?
except ImportError:
def set_cloexec(fd, cloexec): # noqa
try:
FD_CLOEXEC = fcntl.FD_CLOEXEC
except AttributeError:
raise NotImplementedError(
'close-on-exec flag not supported on this platform',
)
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
if cloexec:
flags |= FD_CLOEXEC
else:
flags &= ~FD_CLOEXEC
return fcntl.fcntl(fd, fcntl.F_SETFD, flags)


def get_errno(exc):
""":exc:`socket.error` and :exc:`IOError` first got
the ``.errno`` attribute in Py2.7"""
try:
return exc.errno
except AttributeError:
try:
# e.args = (errno, reason)
if isinstance(exc.args, tuple) and len(exc.args) == 2:
return exc.args[0]
except AttributeError:
pass
return 0

+ 85
- 0
thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/PKG-INFO View File

@@ -0,0 +1,85 @@
Metadata-Version: 1.1
Name: anyjson
Version: 0.3.3
Summary: Wraps the best available JSON implementation available in a common interface
Home-page: http://bitbucket.org/runeh/anyjson/
Author: Rune Halvorsen
Author-email: runefh@gmail.com
License: BSD
Description: ##############################
anyjson - JSON library wrapper
##############################
Overview
--------
Anyjson loads whichever is the fastest JSON module installed and provides
a uniform API regardless of which JSON implementation is used.
Originally part of carrot (http://github.com/ask/carrot/)
Examples
--------
To serialize a python object to a JSON string, call the `serialize` function:
>>> import anyjson
>>> anyjson.serialize(["test", 1, {"foo": 3.141592}, "bar"])
'["test", 1, {"foo": 3.141592}, "bar"]'
Conversion the other way is done with the `deserialize` call.
>>> anyjson.deserialize("""["test", 1, {"foo": 3.141592}, "bar"]""")
['test', 1, {'foo': 3.1415920000000002}, 'bar']
Regardless of the JSON implementation used, the exceptions will be the same.
This means that trying to serialize something not compatible with JSON
raises a TypeError:
>>> anyjson.serialize([object()])
Traceback (most recent call last):
<snipped traceback>
TypeError: object is not JSON encodable
And deserializing a JSON string with invalid JSON raises a ValueError:
>>> anyjson.deserialize("""['missing square brace!""")
Traceback (most recent call last):
<snipped traceback>
ValueError: cannot parse JSON description
Contact
-------
The module is maintaned by Rune F. Halvorsen <runefh@gmail.com>.
The project resides at http://bitbucket.org/runeh/anyjson . Bugs and feature
requests can be submitted there. Patches are also very welcome.
Changelog
---------
See CHANGELOG file
License
-------
see the LICENSE file
Keywords: json
Platform: any
Classifier: Development Status :: 5 - Production/Stable
Classifier: License :: OSI Approved :: BSD License
Classifier: Operating System :: OS Independent
Classifier: Intended Audience :: Developers
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.4
Classifier: Programming Language :: Python :: 2.5
Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.1
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Programming Language :: Python :: Implementation :: PyPy
Classifier: Programming Language :: Python :: Implementation :: Jython

+ 15
- 0
thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/SOURCES.txt View File

@@ -0,0 +1,15 @@
CHANGELOG
LICENSE
MANIFEST.in
README
setup.cfg
setup.py
anyjson/__init__.py
anyjson.egg-info/PKG-INFO
anyjson.egg-info/SOURCES.txt
anyjson.egg-info/dependency_links.txt
anyjson.egg-info/not-zip-safe
anyjson.egg-info/top_level.txt
tests/benchmark.py
tests/test_implementations.py
tests/test_implementations.pyc

+ 1
- 0
thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/dependency_links.txt View File

@@ -0,0 +1 @@


+ 7
- 0
thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/installed-files.txt View File

@@ -0,0 +1,7 @@
../anyjson/__init__.py
../anyjson/__pycache__/__init__.cpython-36.pyc
PKG-INFO
SOURCES.txt
dependency_links.txt
not-zip-safe
top_level.txt

+ 1
- 0
thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/not-zip-safe View File

@@ -0,0 +1 @@


+ 1
- 0
thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/top_level.txt View File

@@ -0,0 +1 @@
anyjson

+ 142
- 0
thesisenv/lib/python3.6/site-packages/anyjson/__init__.py View File

@@ -0,0 +1,142 @@
"""Wraps the best available JSON implementation available in a common
interface"""

import sys

VERSION = (0, 3, 3)
__version__ = ".".join(map(str, VERSION[0:3])) + "".join(VERSION[3:])
__author__ = "Rune Halvorsen"
__contact__ = "runefh@gmail.com"
__homepage__ = "http://bitbucket.org/runeh/anyjson/"
__docformat__ = "restructuredtext"

# -eof meta-

#: The json implementation object. This is probably not useful to you,
#: except to get the name of the implementation in use. The name is
#: available through ``implementation.name``.
implementation = None

# json.loads does not support buffer() objects,
# so we load() and StringIO instead, and it won't copy.
if sys.version_info[0] == 3:
from io import StringIO
else:
try:
from io import StringIO # noqa
except ImportError:
from io import StringIO # noqa

#: List of known json modules, and the names of their loads/dumps
#: methods, as well as the exceptions they throw. Exception can be either
#: an exception class or a string.
_modules = [("yajl", "dumps", TypeError, "loads", ValueError, "load"),
("jsonlib2", "write", "WriteError", "read", "ReadError", None),
("jsonlib", "write", "WriteError", "read", "ReadError", None),
("simplejson", "dumps", TypeError, "loads", ValueError, "load"),
("json", "dumps", TypeError, "loads", ValueError, "load"),
("django.utils.simplejson", "dumps", TypeError, "loads", ValueError, "load"),
("cjson", "encode", "EncodeError", "decode", "DecodeError", None)
]

_fields = ("modname", "encoder", "encerror",
"decoder", "decerror", "filedecoder")


class _JsonImplementation(object):
"""Incapsulates a JSON implementation"""

def __init__(self, modspec):
modinfo = dict(list(zip(_fields, modspec)))

if modinfo["modname"] == "cjson":
import warnings
warnings.warn("cjson is deprecated! See http://pypi.python.org/pypi/python-cjson/1.0.5", DeprecationWarning)

# No try block. We want importerror to end up at caller
module = self._attempt_load(modinfo["modname"])

self.implementation = modinfo["modname"]
self._encode = getattr(module, modinfo["encoder"])
self._decode = getattr(module, modinfo["decoder"])
fdec = modinfo["filedecoder"]
self._filedecode = fdec and getattr(module, fdec)
self._encode_error = modinfo["encerror"]
self._decode_error = modinfo["decerror"]

if isinstance(modinfo["encerror"], str):
self._encode_error = getattr(module, modinfo["encerror"])
if isinstance(modinfo["decerror"], str):
self._decode_error = getattr(module, modinfo["decerror"])

self.name = modinfo["modname"]

def __repr__(self):
return "<_JsonImplementation instance using %s>" % self.name

def _attempt_load(self, modname):
"""Attempt to load module name modname, returning it on success,
throwing ImportError if module couldn't be imported"""
__import__(modname)
return sys.modules[modname]

def dumps(self, data):
"""Serialize the datastructure to json. Returns a string. Raises
TypeError if the object could not be serialized."""
try:
return self._encode(data)
except self._encode_error as exc:
raise TypeError(TypeError(*exc.args)).with_traceback(sys.exc_info()[2])
serialize = dumps

def loads(self, s):
"""deserialize the string to python data types. Raises
ValueError if the string could not be parsed."""
# uses StringIO to support buffer objects.
try:
if self._filedecode and not isinstance(s, str):
return self._filedecode(StringIO(s))
return self._decode(s)
except self._decode_error as exc:
raise ValueError(ValueError(*exc.args)).with_traceback(sys.exc_info()[2])
deserialize = loads


def force_implementation(modname):
"""Forces anyjson to use a specific json module if it's available"""
global implementation
for name, spec in [(e[0], e) for e in _modules]:
if name == modname:
implementation = _JsonImplementation(spec)
return
raise ImportError("No module named: %s" % modname)


if __name__ == "__main__":
# If run as a script, we do nothing but print an error message.
# We do NOT try to load a compatible module because that may throw an
# exception, which renders the package uninstallable with easy_install
# (It trys to execfile the script when installing, to make sure it works)
print("Running anyjson as a stand alone script is not supported")
sys.exit(1)
else:
for modspec in _modules:
try:
implementation = _JsonImplementation(modspec)
break
except ImportError:
pass
else:
raise ImportError("No supported JSON module found")


def loads(value):
"""Serialize the object to JSON."""
return implementation.loads(value)
deserialize = loads # compat


def dumps(value):
"""Deserialize JSON-encoded object to a Python object."""
return implementation.dumps(value)
serialize = dumps

+ 792
- 0
thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/PKG-INFO View File

@@ -0,0 +1,792 @@
Metadata-Version: 1.2
Name: billiard
Version: 3.3.0.23
Summary: Python multiprocessing fork with improvements and bugfixes
Home-page: http://github.com/celery/billiard
Author: R Oudkerk / Python Software Foundation
Author-email: python-dev@python.org
Maintainer: Ask Solem
Maintainer-email: ask@celeryproject.org
License: BSD
Description: ========
billiard
========
:version: 3.3.0.23
About
-----
`billiard` is a fork of the Python 2.7 `multiprocessing <http://docs.python.org/library/multiprocessing.html>`_
package. The multiprocessing package itself is a renamed and updated version of
R Oudkerk's `pyprocessing <http://pypi.python.org/pypi/processing/>`_ package.
This standalone variant is intended to be compatible with Python 2.4 and 2.5,
and will draw it's fixes/improvements from python-trunk.
- This package would not be possible if not for the contributions of not only
the current maintainers but all of the contributors to the original pyprocessing
package listed `here <http://pyprocessing.berlios.de/doc/THANKS.html>`_
- Also it is a fork of the multiprocessin backport package by Christian Heims.
- It includes the no-execv patch contributed by R. Oudkerk.
- And the Pool improvements previously located in `Celery`_.
.. _`Celery`: http://celeryproject.org
Bug reporting
-------------
Please report bugs related to multiprocessing at the
`Python bug tracker <http://bugs.python.org/>`_. Issues related to billiard
should be reported at http://github.com/celery/billiard/issues.
.. image:: https://d2weczhvl823v0.cloudfront.net/celery/billiard/trend.png
:alt: Bitdeli badge
:target: https://bitdeli.com/free
===========
Changes
===========
3.3.0.23 - 2016-03-03
---------------------
- ExceptionInfo: Adds tb_lasti and other missing traceback fields
(Issue #180).
- monotonic: Now makes sure ctypes is available.
- PipeConnection: Make sure the pipe is not closed multiple times.
3.3.0.22 - 2015-12-08
---------------------
- Wheel packages for Windows now available.
3.3.0.21 - 2015-10-26
---------------------
- Pool: Fixed semaphore error on Python3.
- Fixed libSystem error on OS X El Capitan.
3.3.0.20 - 2015-04-17
---------------------
- Pool: Timeouts will attempt to send SIGKILL, but this signal
does not exist on Windows. Replaced with SIGTERM.
3.3.0.19 - 2014-10-13
---------------------
- Pool: Exceptions in user timeout callbacks are now logged instead
of crashing the pool.
Contributed by Pierre Fersing.
- Pool: Exit codes in errors were improperly being represented as signals.
- Pool: ``.map``. and ``.imap`` now working again.
- Now builds on FreeBSD 10.
Contributed by Michael Fladischer.
3.3.0.18 - 2014-06-20
---------------------
- Now compiles on GNU/kFreeBSD
Contributed by Michael Fladischer.
- Pool: `AF_PIPE` address fixed so that it works on recent Windows versions
in combination with Python 2.7.7.
Fix contributed by Joshua Tacoma.
- Pool: Fix for `Supervisor object has no attribute _children` error.
Fix contributed by Andres Riancho.
- Pool: Fixed bug with human_status(None).
- Pool: shrink did not work properly if asked to remove more than 1 process.
3.3.0.17 - 2014-04-16
---------------------
- Fixes SemLock on Python 3.4 (Issue #107) when using
``forking_enable(False)``.
- Pool: Include more useful exitcode information when processes exit.
3.3.0.16 - 2014-02-11
---------------------
- Previous release was missing the billiard.py3 package from MANIFEST
so the installation would not work on Python 3.
3.3.0.15 - 2014-02-10
---------------------
- Pool: Fixed "cannot join process not started" error.
- Now uses billiard.py2 and billiard.py3 specific packages that are installed
depending on the python version used.
This way the installation will not import version specific modules (and
possibly crash).
3.3.0.14 - 2014-01-17
---------------------
- Fixed problem with our backwards compatible ``bytes`` wrapper
(Issue #103).
- No longer expects frozen applications to have a valid ``__file__``
attribute.
Fix contributed by George Sibble.
3.3.0.13 - 2013-12-13
---------------------
- Fixes compatability with Python < 2.7.6
- No longer attempts to handle ``SIGBUS``
Contributed by Vishal Vatsa.
- Non-thread based pool now only handles signals:
``SIGHUP``, ``SIGQUIT``, ``SIGTERM``, ``SIGUSR1``,
``SIGUSR2``.
- setup.py: Only show compilation warning for build related commands.
3.3.0.12 - 2013-12-09
---------------------
- Fixed installation for Python 3.
Contributed by Rickert Mulder.
- Pool: Fixed bug with maxtasksperchild.
Fix contributed by Ionel Cristian Maries.
- Pool: Fixed bug in maintain_pool.
3.3.0.11 - 2013-12-03
---------------------
- Fixed Unicode error when installing the distribution (Issue #89).
- Daemonic processes are now allowed to have children.
But note that it will not be possible to automatically
terminate them when the process exits.
See discussion at https://github.com/celery/celery/issues/1709
- Pool: Would not always be able to detect that a process exited.
3.3.0.10 - 2013-12-02
---------------------
- Windows: Fixed problem with missing ``WAITABANDONED_0``
Fix contributed by Matthias Wagner
- Windows: PipeConnection can now be inherited.
Fix contributed by Matthias Wagner
3.3.0.9 - 2013-12-02
--------------------
- Temporary workaround for Celery maxtasksperchild issue.
Fix contributed by Ionel Cristian Maries.
3.3.0.8 - 2013-11-21
--------------------
- Now also sets ``multiprocessing.current_process`` for compatibility
with loggings ``processName`` field.
3.3.0.7 - 2013-11-15
--------------------
- Fixed compatibility with PyPy 2.1 + 2.2.
- Fixed problem in pypy detection.
Fix contributed by Tin Tvrtkovic.
- Now uses ``ctypes.find_library`` instead of hardcoded path to find
the OS X CoreServices framework.
Fix contributed by Moritz Kassner.
3.3.0.6 - 2013-11-12
--------------------
- Now works without C extension again.
- New ``_billiard.read(fd, buffer, [len, ])`` function
implements os.read with buffer support (new buffer API)
- New pure-python implementation of ``Connection.send_offset``.
3.3.0.5 - 2013-11-11
--------------------
- All platforms except for Windows/PyPy/Jython now requires the C extension.
3.3.0.4 - 2013-11-11
--------------------
- Fixed problem with Python3 and setblocking.
3.3.0.3 - 2013-11-09
--------------------
- Now works on Windows again.
3.3.0.2 - 2013-11-08
--------------------
- ApplyResult.terminate() may be set to signify that the job
must not be executed. It can be used in combination with
Pool.terminate_job.
- Pipe/_SimpleQueue: Now supports rnonblock/wnonblock arguments
to set the read or write end of the pipe to be nonblocking.
- Pool: Log message included exception info but exception happened
in another process so the resulting traceback was wrong.
- Pool: Worker process can now prepare results before they are sent
back to the main process (using ``Worker.prepare_result``).
3.3.0.1 - 2013-11-04
--------------------
- Pool: New ``correlation_id`` argument to ``apply_async`` can be
used to set a related id for the ``ApplyResult`` object returned:
>>> r = pool.apply_async(target, args, kwargs, correlation_id='foo')
>>> r.correlation_id
'foo'
- Pool: New callback `on_process_exit` is called when a pool
process exits, with signature ``(pid, exitcode)``.
Contributed by Daniel M. Taub.
- Pool: Improved the too many restarts detection.
3.3.0.0 - 2013-10-14
--------------------
- Dual code base now runs on Python 2.6+ and Python 3.
- No longer compatible with Python 2.5
- Includes many changes from multiprocessing in 3.4.
- Now uses ``time.monotonic`` when available, also including
fallback implementations for Linux and OS X.
- No longer cleans up after receiving SIGILL, SIGSEGV or SIGFPE
Contributed by Kevin Blackham
- ``Finalize`` and ``register_after_fork`` is now aliases to multiprocessing.
It's better to import these from multiprocessing directly now
so that there aren't multiple registries.
- New `billiard.queues._SimpleQueue` that does not use semaphores.
- Pool: Can now be extended to support using multiple IPC queues.
- Pool: Can now use async I/O to write to pool IPC queues.
- Pool: New ``Worker.on_loop_stop`` handler can be used to add actions
at pool worker process shutdown.
Note that, like all finalization handlers, there is no guarantee that
this will be executed.
Contributed by dmtaub.
2.7.3.30 - 2013-06-28
---------------------
- Fixed ImportError in billiard._ext
2.7.3.29 - 2013-06-28
---------------------
- Compilation: Fixed improper handling of HAVE_SEM_OPEN (Issue #55)
Fix contributed by Krzysztof Jagiello.
- Process now releases logging locks after fork.
This previously happened in Pool, but it was done too late
as processes logs when they bootstrap.
- Pool.terminate_job now ignores `No such process` errors.
- billiard.Pool entrypoint did not support new arguments
to billiard.pool.Pool
- Connection inbound buffer size increased from 1kb to 128kb.
- C extension cleaned up by properly adding a namespace to symbols.
- _exit_function now works even if thread wakes up after gc collect.
2.7.3.28 - 2013-04-16
---------------------
- Pool: Fixed regression that disabled the deadlock
fix in 2.7.3.24
- Pool: RestartFreqExceeded could be raised prematurely.
- Process: Include pid in startup and process INFO logs.
2.7.3.27 - 2013-04-12
---------------------
- Manager now works again.
- Python 3 fixes for billiard.connection.
- Fixed invalid argument bug when running on Python 3.3
Fix contributed by Nathan Wan.
- Ignore OSError when setting up signal handlers.
2.7.3.26 - 2013-04-09
---------------------
- Pool: Child processes must ignore SIGINT.
2.7.3.25 - 2013-04-09
---------------------
- Pool: 2.7.3.24 broke support for subprocesses (Issue #48).
Signals that should be ignored were instead handled
by terminating.
2.7.3.24 - 2013-04-08
---------------------
- Pool: Make sure finally blocks are called when process exits
due to a signal.
This fixes a deadlock problem when the process is killed
while having acquired the shared semaphore. However, this solution
does not protect against the processes being killed, a more elaborate
solution is required for that. Hopefully this will be fixed soon in a
later version.
- Pool: Can now use GDB to debug pool child processes.
- Fixes Python 3 compatibility problems.
Contributed by Albertas Agejevas.
2.7.3.23 - 2013-03-22
---------------------
- Windows: Now catches SystemExit from setuptools while trying to build
the C extension (Issue #41).
2.7.3.22 - 2013-03-08
---------------------
- Pool: apply_async now supports a ``callbacks_propagate`` keyword
argument that can be a tuple of exceptions to propagate in callbacks.
(callback, errback, accept_callback, timeout_callback).
- Errors are no longer logged for OK and recycle exit codes.
This would cause normal maxtasksperchild recycled process
to log an error.
- Fixed Python 2.5 compatibility problem (Issue #33).
- FreeBSD: Compilation now disables semaphores if Python was built
without it (Issue #40).
Contributed by William Grzybowski
2.7.3.21 - 2013-02-11
---------------------
- Fixed typo EX_REUSE -> EX_RECYCLE
- Code now conforms to new pep8.py rules.
2.7.3.20 - 2013-02-08
---------------------
- Pool: Disable restart limit if maxR is not set.
- Pool: Now uses os.kill instead of signal.signal.
Contributed by Lukasz Langa
- Fixed name error in process.py
- Pool: ApplyResult.get now properly raises exceptions.
Fix contributed by xentac.
2.7.3.19 - 2012-11-30
---------------------
- Fixes problem at shutdown when gc has collected symbols.
- Pool now always uses _kill for Py2.5 compatibility on Windows (Issue #32).
- Fixes Python 3 compatibility issues
2.7.3.18 - 2012-11-05
---------------------
- [Pool] Fix for check_timeouts if not set.
Fix contributed by Dmitry Sukhov
- Fixed pickle problem with Traceback.
Code.frame.__loader__ is now ignored as it may be set to
an unpickleable object.
- The Django old-layout warning was always showing.
2.7.3.17 - 2012-09-26
---------------------
- Fixes typo
2.7.3.16 - 2012-09-26
---------------------
- Windows: Fixes for SemLock._rebuild (Issue #24).
- Pool: Job terminated with terminate_job now raises
billiard.exceptions.Terminated.
2.7.3.15 - 2012-09-21
---------------------
- Windows: Fixes unpickling of SemLock when using fallback.
- Windows: Fixes installation when no C compiler.
2.7.3.14 - 2012-09-20
---------------------
- Installation now works again for Python 3.
2.7.3.13 - 2012-09-14
---------------------
- Merged with Python trunk (many authors, many fixes: see Python changelog in
trunk).
- Using execv now also works with older Django projects using setup_environ
(Issue #10).
- Billiard now installs with a warning that the C extension could not be built
if a compiler is not installed or the build fails in some other way.
It really is recommended to have the C extension installed when running
with force execv, but this change also makes it easier to install.
- Pool: Hard timeouts now sends KILL shortly after TERM so that C extensions
cannot block the signal.
Python signal handlers are called in the interpreter, so they cannot
be called while a C extension is blocking the interpreter from running.
- Now uses a timeout value for Thread.join that doesn't exceed the maximum
on some platforms.
- Fixed bug in the SemLock fallback used when C extensions not installed.
Fix contributed by Mher Movsisyan.
- Pool: Now sets a Process.index attribute for every process in the pool.
This number will always be between 0 and concurrency-1, and
can be used to e.g. create a logfile for each process in the pool
without creating a new logfile whenever a process is replaced.
2.7.3.12 - 2012-08-05
---------------------
- Fixed Python 2.5 compatibility issue.
- New Pool.terminate_job(pid) to terminate a job without raising WorkerLostError
2.7.3.11 - 2012-08-01
---------------------
- Adds support for FreeBSD 7+
Fix contributed by koobs.
- Pool: New argument ``allow_restart`` is now required to enable
the pool process sentinel that is required to restart the pool.
It's disabled by default, which reduces the number of file
descriptors/semaphores required to run the pool.
- Pool: Now emits a warning if a worker process exited with error-code.
But not if the error code is 155, which is now returned if the worker
process was recycled (maxtasksperchild).
- Python 3 compatibility fixes.
- Python 2.5 compatibility fixes.
2.7.3.10 - 2012-06-26
---------------------
- The ``TimeLimitExceeded`` exception string representation
only included the seconds as a number, it now gives a more human
friendly description.
- Fixed typo in ``LaxBoundedSemaphore.shrink``.
- Pool: ``ResultHandler.handle_event`` no longer requires
any arguments.
- setup.py bdist now works
2.7.3.9 - 2012-06-03
--------------------
- Environment variable ``MP_MAIN_FILE`` envvar is now set to
the path of the ``__main__`` module when execv is enabled.
- Pool: Errors occurring in the TaskHandler are now reported.
2.7.3.8 - 2012-06-01
--------------------
- Can now be installed on Py 3.2
- Issue #12091: simplify ApplyResult and MapResult with threading.Event
Patch by Charles-Francois Natali
- Pool: Support running without TimeoutHandler thread.
- The with_*_thread arguments has also been replaced with
a single `threads=True` argument.
- Two new pool callbacks:
- ``on_timeout_set(job, soft, hard)``
Applied when a task is executed with a timeout.
- ``on_timeout_cancel(job)``
Applied when a timeout is cancelled (the job completed)
2.7.3.7 - 2012-05-21
--------------------
- Fixes Python 2.5 support.
2.7.3.6 - 2012-05-21
--------------------
- Pool: Can now be used in an event loop, without starting the supporting
threads (TimeoutHandler still not supported)
To facilitate this the pool has gained the following keyword arguments:
- ``with_task_thread``
- ``with_result_thread``
- ``with_supervisor_thread``
- ``on_process_up``
Callback called with Process instance as argument
whenever a new worker process is added.
Used to add new process fds to the eventloop::
def on_process_up(proc):
hub.add_reader(proc.sentinel, pool.maintain_pool)
- ``on_process_down``
Callback called with Process instance as argument
whenever a new worker process is found dead.
Used to remove process fds from the eventloop::
def on_process_down(proc):
hub.remove(proc.sentinel)
- ``semaphore``
Sets the semaphore used to protect from adding new items to the
pool when no processes available. The default is a threaded
one, so this can be used to change to an async semaphore.
And the following attributes::
- ``readers``
A map of ``fd`` -> ``callback``, to be registered in an eventloop.
Currently this is only the result outqueue with a callback
that processes all currently incoming results.
And the following methods::
- ``did_start_ok``
To be called after starting the pool, and after setting up the
eventloop with the pool fds, to ensure that the worker processes
didn't immediately exit caused by an error (internal/memory).
- ``maintain_pool``
Public version of ``_maintain_pool`` that handles max restarts.
- Pool: Process too frequent restart protection now only counts if the process
had a non-successful exit-code.
This to take into account the maxtasksperchild option, and allowing
processes to exit cleanly on their own.
- Pool: New options max_restart + max_restart_freq
This means that the supervisor can't restart processes
faster than max_restart' times per max_restart_freq seconds
(like the Erlang supervisor maxR & maxT settings).
The pool is closed and joined if the max restart
frequency is exceeded, where previously it would keep restarting
at an unlimited rate, possibly crashing the system.
The current default value is to stop if it exceeds
100 * process_count restarts in 1 seconds. This may change later.
It will only count processes with an unsuccessful exit code,
this is to take into account the ``maxtasksperchild`` setting
and code that voluntarily exits.
- Pool: The ``WorkerLostError`` message now includes the exit-code of the
process that disappeared.
2.7.3.5 - 2012-05-09
--------------------
- Now always cleans up after ``sys.exc_info()`` to avoid
cyclic references.
- ExceptionInfo without arguments now defaults to ``sys.exc_info``.
- Forking can now be disabled using the
``MULTIPROCESSING_FORKING_DISABLE`` environment variable.
Also this envvar is set so that the behavior is inherited
after execv.
- The semaphore cleanup process started when execv is used
now sets a useful process name if the ``setproctitle``
module is installed.
- Sets the ``FORKED_BY_MULTIPROCESSING``
environment variable if forking is disabled.
2.7.3.4 - 2012-04-27
--------------------
- Added `billiard.ensure_multiprocessing()`
Raises NotImplementedError if the platform does not support
multiprocessing (e.g. Jython).
2.7.3.3 - 2012-04-23
--------------------
- PyPy now falls back to using its internal _multiprocessing module,
so everything works except for forking_enable(False) (which
silently degrades).
- Fixed Python 2.5 compat. issues.
- Uses more with statements
- Merged some of the changes from the Python 3 branch.
2.7.3.2 - 2012-04-20
--------------------
- Now installs on PyPy/Jython (but does not work).
2.7.3.1 - 2012-04-20
--------------------
- Python 2.5 support added.
2.7.3.0 - 2012-04-20
--------------------
- Updated from Python 2.7.3
- Python 2.4 support removed, now only supports 2.5, 2.6 and 2.7.
(may consider py3k support at some point).
- Pool improvements from Celery.
- no-execv patch added (http://bugs.python.org/issue8713)
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: Programming Language :: Python
Classifier: Programming Language :: C
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.5
Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.2
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Programming Language :: Python :: Implementation :: Jython
Classifier: Programming Language :: Python :: Implementation :: PyPy
Classifier: Operating System :: Microsoft :: Windows
Classifier: Operating System :: POSIX
Classifier: License :: OSI Approved :: BSD License
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Classifier: Topic :: System :: Distributed Computing

+ 71
- 0
thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/SOURCES.txt View File

@@ -0,0 +1,71 @@
CHANGES.txt
INSTALL.txt
LICENSE.txt
MANIFEST.in
Makefile
README.rst
pip-delete-this-directory.txt
setup.cfg
setup.py
Doc/conf.py
Doc/glossary.rst
Doc/index.rst
Doc/includes/__init__.py
Doc/includes/mp_benchmarks.py
Doc/includes/mp_newtype.py
Doc/includes/mp_pool.py
Doc/includes/mp_synchronize.py
Doc/includes/mp_webserver.py
Doc/includes/mp_workers.py
Doc/library/multiprocessing.rst
Modules/_billiard/connection.h
Modules/_billiard/multiprocessing.c
Modules/_billiard/multiprocessing.h
Modules/_billiard/pipe_connection.c
Modules/_billiard/semaphore.c
Modules/_billiard/socket_connection.c
Modules/_billiard/win32_functions.c
billiard/__init__.py
billiard/_ext.py
billiard/_win.py
billiard/common.py
billiard/compat.py
billiard/connection.py
billiard/einfo.py
billiard/exceptions.py
billiard/five.py
billiard/forking.py
billiard/heap.py
billiard/managers.py
billiard/pool.py
billiard/process.py
billiard/queues.py
billiard/reduction.py
billiard/sharedctypes.py
billiard/synchronize.py
billiard/util.py
billiard.egg-info/PKG-INFO
billiard.egg-info/SOURCES.txt
billiard.egg-info/dependency_links.txt
billiard.egg-info/not-zip-safe
billiard.egg-info/top_level.txt
billiard/dummy/__init__.py
billiard/dummy/connection.py
billiard/py2/__init__.py
billiard/py2/connection.py
billiard/py2/reduction.py
billiard/py3/__init__.py
billiard/py3/connection.py
billiard/py3/reduction.py
billiard/tests/__init__.py
billiard/tests/compat.py
billiard/tests/test_common.py
billiard/tests/test_package.py
billiard/tests/utils.py
funtests/__init__.py
funtests/setup.py
funtests/tests/__init__.py
funtests/tests/test_multiprocessing.py
requirements/test-ci.txt
requirements/test.txt
requirements/test3.txt

+ 1
- 0
thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/dependency_links.txt View File

@@ -0,0 +1 @@


+ 67
- 0
thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/installed-files.txt View File

@@ -0,0 +1,67 @@
../billiard/__init__.py
../billiard/__pycache__/__init__.cpython-36.pyc
../billiard/__pycache__/_ext.cpython-36.pyc
../billiard/__pycache__/_win.cpython-36.pyc
../billiard/__pycache__/common.cpython-36.pyc
../billiard/__pycache__/compat.cpython-36.pyc
../billiard/__pycache__/connection.cpython-36.pyc
../billiard/__pycache__/einfo.cpython-36.pyc
../billiard/__pycache__/exceptions.cpython-36.pyc
../billiard/__pycache__/five.cpython-36.pyc
../billiard/__pycache__/forking.cpython-36.pyc
../billiard/__pycache__/heap.cpython-36.pyc
../billiard/__pycache__/managers.cpython-36.pyc
../billiard/__pycache__/pool.cpython-36.pyc
../billiard/__pycache__/process.cpython-36.pyc
../billiard/__pycache__/queues.cpython-36.pyc
../billiard/__pycache__/reduction.cpython-36.pyc
../billiard/__pycache__/sharedctypes.cpython-36.pyc
../billiard/__pycache__/synchronize.cpython-36.pyc
../billiard/__pycache__/util.cpython-36.pyc
../billiard/_ext.py
../billiard/_win.py
../billiard/common.py
../billiard/compat.py
../billiard/connection.py
../billiard/dummy/__init__.py
../billiard/dummy/__pycache__/__init__.cpython-36.pyc
../billiard/dummy/__pycache__/connection.cpython-36.pyc
../billiard/dummy/connection.py
../billiard/einfo.py
../billiard/exceptions.py
../billiard/five.py
../billiard/forking.py
../billiard/heap.py
../billiard/managers.py
../billiard/pool.py
../billiard/process.py
../billiard/py3/__init__.py
../billiard/py3/__pycache__/__init__.cpython-36.pyc
../billiard/py3/__pycache__/connection.cpython-36.pyc
../billiard/py3/__pycache__/reduction.cpython-36.pyc
../billiard/py3/connection.py
../billiard/py3/reduction.py
../billiard/queues.py
../billiard/reduction.py
../billiard/sharedctypes.py
../billiard/synchronize.py
../billiard/tests/__init__.py
../billiard/tests/__pycache__/__init__.cpython-36.pyc
../billiard/tests/__pycache__/compat.cpython-36.pyc
../billiard/tests/__pycache__/test_common.cpython-36.pyc
../billiard/tests/__pycache__/test_package.cpython-36.pyc
../billiard/tests/__pycache__/utils.cpython-36.pyc
../billiard/tests/compat.py
../billiard/tests/test_common.py
../billiard/tests/test_package.py
../billiard/tests/utils.py
../billiard/util.py
../funtests/__init__.py
../funtests/__pycache__/__init__.cpython-36.pyc
../funtests/__pycache__/setup.cpython-36.pyc
../funtests/setup.py
PKG-INFO
SOURCES.txt
dependency_links.txt
not-zip-safe
top_level.txt

+ 1
- 0
thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/not-zip-safe View File

@@ -0,0 +1 @@


+ 2
- 0
thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/top_level.txt View File

@@ -0,0 +1,2 @@
billiard
funtests

+ 323
- 0
thesisenv/lib/python3.6/site-packages/billiard/__init__.py View File

@@ -0,0 +1,323 @@
"""Python multiprocessing fork with improvements and bugfixes"""
#
# Package analogous to 'threading.py' but using processes
#
# multiprocessing/__init__.py
#
# This package is intended to duplicate the functionality (and much of
# the API) of threading.py but uses processes instead of threads. A
# subpackage 'multiprocessing.dummy' has the same API but is a simple
# wrapper for 'threading'.
#
# Try calling `multiprocessing.doc.main()` to read the html
# documentation in a webbrowser.
#
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#

from __future__ import absolute_import

import os
import sys
import warnings

from .exceptions import ( # noqa
ProcessError,
BufferTooShort,
TimeoutError,
AuthenticationError,
TimeLimitExceeded,
SoftTimeLimitExceeded,
WorkerLostError,
)
from .process import Process, current_process, active_children
from .util import SUBDEBUG, SUBWARNING

VERSION = (3, 3, 0, 23)
__version__ = '.'.join(map(str, VERSION[0:4])) + "".join(VERSION[4:])
__author__ = 'R Oudkerk / Python Software Foundation'
__author_email__ = 'python-dev@python.org'
__maintainer__ = 'Ask Solem'
__contact__ = "ask@celeryproject.org"
__homepage__ = "http://github.com/celery/billiard"
__docformat__ = "restructuredtext"

# -eof meta-

__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger',
'allow_connection_pickling', 'BufferTooShort', 'TimeoutError',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Queue', 'JoinableQueue', 'Pool', 'Value', 'Array',
'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING', 'set_executable',
'forking_enable', 'forking_is_enabled'
]


def ensure_multiprocessing():
from ._ext import ensure_multiprocessing
return ensure_multiprocessing()


W_NO_EXECV = """\
force_execv is not supported as the billiard C extension \
is not installed\
"""

#
# Definitions not depending on native semaphores
#


def Manager():
'''
Returns a manager associated with a running server process

The managers methods such as `Lock()`, `Condition()` and `Queue()`
can be used to create shared objects.
'''
from .managers import SyncManager
m = SyncManager()
m.start()
return m


def Pipe(duplex=True, rnonblock=False, wnonblock=False):
'''
Returns two connection object connected by a pipe
'''
from billiard.connection import Pipe
return Pipe(duplex, rnonblock, wnonblock)


def cpu_count():
'''
Returns the number of CPUs in the system
'''
if sys.platform == 'win32':
try:
num = int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
num = 0
elif 'bsd' in sys.platform or sys.platform == 'darwin':
comm = '/sbin/sysctl -n hw.ncpu'
if sys.platform == 'darwin':
comm = '/usr' + comm
try:
with os.popen(comm) as p:
num = int(p.read())
except ValueError:
num = 0
else:
try:
num = os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
num = 0

if num >= 1:
return num
else:
raise NotImplementedError('cannot determine number of cpus')


def freeze_support():
'''
Check whether this is a fake forked process in a frozen executable.
If so then run code specified by commandline and exit.
'''
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
from .forking import freeze_support
freeze_support()


def get_logger():
'''
Return package logger -- if it does not already exist then it is created
'''
from .util import get_logger
return get_logger()


def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
from .util import log_to_stderr
return log_to_stderr(level)


def allow_connection_pickling():
'''
Install support for sending connections and sockets between processes
'''
from . import reduction # noqa

#
# Definitions depending on native semaphores
#


def Lock():
'''
Returns a non-recursive lock object
'''
from .synchronize import Lock
return Lock()


def RLock():
'''
Returns a recursive lock object
'''
from .synchronize import RLock
return RLock()


def Condition(lock=None):
'''
Returns a condition object
'''
from .synchronize import Condition
return Condition(lock)


def Semaphore(value=1):
'''
Returns a semaphore object
'''
from .synchronize import Semaphore
return Semaphore(value)


def BoundedSemaphore(value=1):
'''
Returns a bounded semaphore object
'''
from .synchronize import BoundedSemaphore
return BoundedSemaphore(value)


def Event():
'''
Returns an event object
'''
from .synchronize import Event
return Event()


def Queue(maxsize=0):
'''
Returns a queue object
'''
from .queues import Queue
return Queue(maxsize)


def JoinableQueue(maxsize=0):
'''
Returns a queue object
'''
from .queues import JoinableQueue
return JoinableQueue(maxsize)


def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None,
timeout=None, soft_timeout=None, lost_worker_timeout=None,
max_restarts=None, max_restart_freq=1, on_process_up=None,
on_process_down=None, on_timeout_set=None, on_timeout_cancel=None,
threads=True, semaphore=None, putlocks=False, allow_restart=False):
'''
Returns a process pool object
'''
from .pool import Pool
return Pool(processes, initializer, initargs, maxtasksperchild,
timeout, soft_timeout, lost_worker_timeout,
max_restarts, max_restart_freq, on_process_up,
on_process_down, on_timeout_set, on_timeout_cancel,
threads, semaphore, putlocks, allow_restart)


def RawValue(typecode_or_type, *args):
'''
Returns a shared object
'''
from .sharedctypes import RawValue
return RawValue(typecode_or_type, *args)


def RawArray(typecode_or_type, size_or_initializer):
'''
Returns a shared array
'''
from .sharedctypes import RawArray
return RawArray(typecode_or_type, size_or_initializer)


def Value(typecode_or_type, *args, **kwds):
'''
Returns a synchronized shared object
'''
from .sharedctypes import Value
return Value(typecode_or_type, *args, **kwds)


def Array(typecode_or_type, size_or_initializer, **kwds):
'''
Returns a synchronized shared array
'''
from .sharedctypes import Array
return Array(typecode_or_type, size_or_initializer, **kwds)

#
#
#


def set_executable(executable):
'''
Sets the path to a python.exe or pythonw.exe binary used to run
child processes on Windows instead of sys.executable.
Useful for people embedding Python.
'''
from .forking import set_executable
set_executable(executable)


def forking_is_enabled():
'''
Returns a boolean value indicating whether billiard is
currently set to create child processes by forking the current
python process rather than by starting a new instances of python.

On Windows this always returns `False`. On Unix it returns `True` by
default.
'''
from . import forking
return forking._forking_is_enabled


def forking_enable(value):
'''
Enable/disable creation of child process by forking the current process.

`value` should be a boolean value. If `value` is true then
forking is enabled. If `value` is false then forking is disabled.
On systems with `os.fork()` forking is enabled by default, and on
other systems it is always disabled.
'''
if not value:
from ._ext import supports_exec
if supports_exec:
from . import forking
if value and not hasattr(os, 'fork'):
raise ValueError('os.fork() not found')
forking._forking_is_enabled = bool(value)
if not value:
os.environ["MULTIPROCESSING_FORKING_DISABLE"] = "1"
else:
warnings.warn(RuntimeWarning(W_NO_EXECV))
if os.environ.get("MULTIPROCESSING_FORKING_DISABLE"):
forking_enable(False)

+ 40
- 0
thesisenv/lib/python3.6/site-packages/billiard/_ext.py View File

@@ -0,0 +1,40 @@
from __future__ import absolute_import

import sys

supports_exec = True

from .compat import _winapi as win32 # noqa

if sys.platform.startswith("java"):
_billiard = None
else:
try:
import _billiard # noqa
except ImportError:
import _multiprocessing as _billiard # noqa
supports_exec = False
try:
Connection = _billiard.Connection
except AttributeError: # Py3
from billiard.connection import Connection # noqa

PipeConnection = getattr(_billiard, "PipeConnection", None)


def ensure_multiprocessing():
if _billiard is None:
raise NotImplementedError("multiprocessing not supported")


def ensure_SemLock():
try:
from _billiard import SemLock # noqa
except ImportError:
try:
from _multiprocessing import SemLock # noqa
except ImportError:
raise ImportError("""\
This platform lacks a functioning sem_open implementation, therefore,
the required synchronization primitives needed will not function,
see issue 3770.""")

+ 116
- 0
thesisenv/lib/python3.6/site-packages/billiard/_win.py View File

@@ -0,0 +1,116 @@
# -*- coding: utf-8 -*-
"""
billiard._win
~~~~~~~~~~~~~

Windows utilities to terminate process groups.

"""
from __future__ import absolute_import

import os

# psutil is painfully slow in win32. So to avoid adding big
# dependencies like pywin32 a ctypes based solution is preferred

# Code based on the winappdbg project http://winappdbg.sourceforge.net/
# (BSD License)
from ctypes import (
byref, sizeof, windll,
Structure, WinError, POINTER,
c_size_t, c_char, c_void_p,
)
from ctypes.wintypes import DWORD, LONG

ERROR_NO_MORE_FILES = 18
INVALID_HANDLE_VALUE = c_void_p(-1).value


class PROCESSENTRY32(Structure):
_fields_ = [
('dwSize', DWORD),
('cntUsage', DWORD),
('th32ProcessID', DWORD),
('th32DefaultHeapID', c_size_t),
('th32ModuleID', DWORD),
('cntThreads', DWORD),
('th32ParentProcessID', DWORD),
('pcPriClassBase', LONG),
('dwFlags', DWORD),
('szExeFile', c_char * 260),
]
LPPROCESSENTRY32 = POINTER(PROCESSENTRY32)


def CreateToolhelp32Snapshot(dwFlags=2, th32ProcessID=0):
hSnapshot = windll.kernel32.CreateToolhelp32Snapshot(dwFlags,
th32ProcessID)
if hSnapshot == INVALID_HANDLE_VALUE:
raise WinError()
return hSnapshot


def Process32First(hSnapshot, pe=None):
return _Process32n(windll.kernel32.Process32First, hSnapshot, pe)


def Process32Next(hSnapshot, pe=None):
return _Process32n(windll.kernel32.Process32Next, hSnapshot, pe)


def _Process32n(fun, hSnapshot, pe=None):
if pe is None:
pe = PROCESSENTRY32()
pe.dwSize = sizeof(PROCESSENTRY32)
success = fun(hSnapshot, byref(pe))
if not success:
if windll.kernel32.GetLastError() == ERROR_NO_MORE_FILES:
return
raise WinError()
return pe


def get_all_processes_pids():
"""Return a dictionary with all processes pids as keys and their
parents as value. Ignore processes with no parents.
"""
h = CreateToolhelp32Snapshot()
parents = {}
pe = Process32First(h)
while pe:
if pe.th32ParentProcessID:
parents[pe.th32ProcessID] = pe.th32ParentProcessID
pe = Process32Next(h, pe)

return parents


def get_processtree_pids(pid, include_parent=True):
"""Return a list with all the pids of a process tree"""
parents = get_all_processes_pids()
all_pids = list(parents.keys())
pids = set([pid])
while 1:
pids_new = pids.copy()

for _pid in all_pids:
if parents[_pid] in pids:
pids_new.add(_pid)

if pids_new == pids:
break

pids = pids_new.copy()

if not include_parent:
pids.remove(pid)

return list(pids)


def kill_processtree(pid, signum):
"""Kill a process and all its descendants"""
family_pids = get_processtree_pids(pid)

for _pid in family_pids:
os.kill(_pid, signum)

+ 134
- 0
thesisenv/lib/python3.6/site-packages/billiard/common.py View File

@@ -0,0 +1,134 @@
# -*- coding: utf-8 -*-
"""
This module contains utilities added by billiard, to keep
"non-core" functionality out of ``.util``."""
from __future__ import absolute_import

import os
import signal
import sys

import pickle as pypickle
try:
import cPickle as cpickle
except ImportError: # pragma: no cover
cpickle = None # noqa

from .exceptions import RestartFreqExceeded
from .five import monotonic

if sys.version_info < (2, 6): # pragma: no cover
# cPickle does not use absolute_imports
pickle = pypickle
pickle_load = pypickle.load
pickle_loads = pypickle.loads
else:
pickle = cpickle or pypickle
pickle_load = pickle.load
pickle_loads = pickle.loads

# cPickle.loads does not support buffer() objects,
# but we can just create a StringIO and use load.
if sys.version_info[0] == 3:
from io import BytesIO
else:
try:
from cStringIO import StringIO as BytesIO # noqa
except ImportError:
from StringIO import StringIO as BytesIO # noqa

EX_SOFTWARE = 70

TERMSIGS_DEFAULT = (
'SIGHUP',
'SIGQUIT',
'SIGTERM',
'SIGUSR1',
'SIGUSR2'
)

TERMSIGS_FULL = (
'SIGHUP',
'SIGQUIT',
'SIGTRAP',
'SIGABRT',
'SIGEMT',
'SIGSYS',
'SIGPIPE',
'SIGALRM',
'SIGTERM',
'SIGXCPU',
'SIGXFSZ',
'SIGVTALRM',
'SIGPROF',
'SIGUSR1',
'SIGUSR2',
)

#: set by signal handlers just before calling exit.
#: if this is true after the sighandler returns it means that something
#: went wrong while terminating the process, and :func:`os._exit`
#: must be called ASAP.
_should_have_exited = [False]


def pickle_loads(s, load=pickle_load):
# used to support buffer objects
return load(BytesIO(s))


def maybe_setsignal(signum, handler):
try:
signal.signal(signum, handler)
except (OSError, AttributeError, ValueError, RuntimeError):
pass


def _shutdown_cleanup(signum, frame):
# we will exit here so if the signal is received a second time
# we can be sure that something is very wrong and we may be in
# a crashing loop.
if _should_have_exited[0]:
os._exit(EX_SOFTWARE)
maybe_setsignal(signum, signal.SIG_DFL)
_should_have_exited[0] = True
sys.exit(-(256 - signum))


def reset_signals(handler=_shutdown_cleanup, full=False):
for sig in TERMSIGS_FULL if full else TERMSIGS_DEFAULT:
try:
signum = getattr(signal, sig)
except AttributeError:
pass
else:
current = signal.getsignal(signum)
if current is not None and current != signal.SIG_IGN:
maybe_setsignal(signum, handler)


class restart_state(object):
RestartFreqExceeded = RestartFreqExceeded

def __init__(self, maxR, maxT):
self.maxR, self.maxT = maxR, maxT
self.R, self.T = 0, None

def step(self, now=None):
now = monotonic() if now is None else now
R = self.R
if self.T and now - self.T >= self.maxT:
# maxT passed, reset counter and time passed.
self.T, self.R = now, 0
elif self.maxR and self.R >= self.maxR:
# verify that R has a value as the result handler
# resets this when a job is accepted. If a job is accepted
# the startup probably went fine (startup restart burst
# protection)
if self.R: # pragma: no cover
self.R = 0 # reset in case someone catches the error
raise self.RestartFreqExceeded("%r in %rs" % (R, self.maxT))
# first run sets T
if self.T is None:
self.T = now
self.R += 1

+ 107
- 0
thesisenv/lib/python3.6/site-packages/billiard/compat.py View File

@@ -0,0 +1,107 @@
from __future__ import absolute_import

import errno
import os
import sys

from .five import range

if sys.platform == 'win32':
try:
import _winapi # noqa
except ImportError: # pragma: no cover
try:
from _billiard import win32 as _winapi # noqa
except (ImportError, AttributeError):
from _multiprocessing import win32 as _winapi # noqa
else:
_winapi = None # noqa


if sys.version_info > (2, 7, 5):
buf_t, is_new_buffer = memoryview, True # noqa
else:
buf_t, is_new_buffer = buffer, False # noqa

if hasattr(os, 'write'):
__write__ = os.write

if is_new_buffer:

def send_offset(fd, buf, offset):
return __write__(fd, buf[offset:])

else: # Py<2.7.6

def send_offset(fd, buf, offset): # noqa
return __write__(fd, buf_t(buf, offset))

else: # non-posix platform

def send_offset(fd, buf, offset): # noqa
raise NotImplementedError('send_offset')


if sys.version_info[0] == 3:
bytes = bytes
else:
_bytes = bytes

# the 'bytes' alias in Python2 does not support an encoding argument.

class bytes(_bytes): # noqa

def __new__(cls, *args):
if len(args) > 1:
return _bytes(args[0]).encode(*args[1:])
return _bytes(*args)

try:
closerange = os.closerange
except AttributeError:

def closerange(fd_low, fd_high): # noqa
for fd in reversed(range(fd_low, fd_high)):
try:
os.close(fd)
except OSError as exc:
if exc.errno != errno.EBADF:
raise


def get_errno(exc):
""":exc:`socket.error` and :exc:`IOError` first got
the ``.errno`` attribute in Py2.7"""
try:
return exc.errno
except AttributeError:
try:
# e.args = (errno, reason)
if isinstance(exc.args, tuple) and len(exc.args) == 2:
return exc.args[0]
except AttributeError:
pass
return 0


if sys.platform == 'win32':

def setblocking(handle, blocking):
raise NotImplementedError('setblocking not implemented on win32')

def isblocking(handle):
raise NotImplementedError('isblocking not implemented on win32')

else:
from os import O_NONBLOCK
from fcntl import fcntl, F_GETFL, F_SETFL

def isblocking(handle): # noqa
return not (fcntl(handle, F_GETFL) & O_NONBLOCK)

def setblocking(handle, blocking): # noqa
flags = fcntl(handle, F_GETFL, 0)
fcntl(
handle, F_SETFL,
flags & (~O_NONBLOCK) if blocking else flags | O_NONBLOCK,
)

+ 27
- 0
thesisenv/lib/python3.6/site-packages/billiard/connection.py View File

@@ -0,0 +1,27 @@
from __future__ import absolute_import

import sys

is_pypy = hasattr(sys, 'pypy_version_info')

if sys.version_info[0] == 3:
from .py3 import connection
else:
from .py2 import connection # noqa


if is_pypy:
import _multiprocessing
from .compat import setblocking, send_offset

class Connection(_multiprocessing.Connection):

def send_offset(self, buf, offset):
return send_offset(self.fileno(), buf, offset)

def setblocking(self, blocking):
setblocking(self.fileno(), blocking)
_multiprocessing.Connection = Connection


sys.modules[__name__] = connection

+ 165
- 0
thesisenv/lib/python3.6/site-packages/billiard/dummy/__init__.py View File

@@ -0,0 +1,165 @@
#
# Support for the API of the multiprocessing package using threads
#
# multiprocessing/dummy/__init__.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
from __future__ import absolute_import

#
# Imports
#

import threading
import sys
import weakref
import array

from threading import Lock, RLock, Semaphore, BoundedSemaphore
from threading import Event

from billiard.five import Queue

from billiard.connection import Pipe

__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
]


class DummyProcess(threading.Thread):

def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
threading.Thread.__init__(self, group, target, name, args, kwargs)
self._pid = None
self._children = weakref.WeakKeyDictionary()
self._start_called = False
self._parent = current_process()

def start(self):
assert self._parent is current_process()
self._start_called = True
self._parent._children[self] = None
threading.Thread.start(self)

@property
def exitcode(self):
if self._start_called and not self.is_alive():
return 0
else:
return None


try:
_Condition = threading._Condition
except AttributeError: # Py3
_Condition = threading.Condition # noqa


class Condition(_Condition):
if sys.version_info[0] == 3:
notify_all = _Condition.notifyAll
else:
notify_all = _Condition.notifyAll.__func__


Process = DummyProcess
current_process = threading.currentThread
current_process()._children = weakref.WeakKeyDictionary()


def active_children():
children = current_process()._children
for p in list(children):
if not p.is_alive():
children.pop(p, None)
return list(children)


def freeze_support():
pass


class Namespace(object):

def __init__(self, **kwds):
self.__dict__.update(kwds)

def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)


dict = dict
list = list


def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)


class Value(object):

def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value

def _get(self):
return self._value

def _set(self, value):
self._value = value
value = property(_get, _set)

def __repr__(self):
return '<%r(%r, %r)>' % (type(self).__name__,
self._typecode, self._value)


def Manager():
return sys.modules[__name__]


def shutdown():
pass


def Pool(processes=None, initializer=None, initargs=()):
from billiard.pool import ThreadPool
return ThreadPool(processes, initializer, initargs)

JoinableQueue = Queue

+ 93
- 0
thesisenv/lib/python3.6/site-packages/billiard/dummy/connection.py View File

@@ -0,0 +1,93 @@
#
# Analogue of `multiprocessing.connection` which uses queues instead of sockets
#
# multiprocessing/dummy/connection.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
from __future__ import absolute_import

from billiard.five import Queue

__all__ = ['Client', 'Listener', 'Pipe']

families = [None]


class Listener(object):

def __init__(self, address=None, family=None, backlog=1):
self._backlog_queue = Queue(backlog)

def accept(self):
return Connection(*self._backlog_queue.get())

def close(self):
self._backlog_queue = None

address = property(lambda self: self._backlog_queue)

def __enter__(self):
return self

def __exit__(self, *exc_info):
self.close()


def Client(address):
_in, _out = Queue(), Queue()
address.put((_out, _in))
return Connection(_in, _out)


def Pipe(duplex=True):
a, b = Queue(), Queue()
return Connection(a, b), Connection(b, a)


class Connection(object):

def __init__(self, _in, _out):
self._out = _out
self._in = _in
self.send = self.send_bytes = _out.put
self.recv = self.recv_bytes = _in.get

def poll(self, timeout=0.0):
if self._in.qsize() > 0:
return True
if timeout <= 0.0:
return False
self._in.not_empty.acquire()
self._in.not_empty.wait(timeout)
self._in.not_empty.release()
return self._in.qsize() > 0

def close(self):
pass

+ 134
- 0
thesisenv/lib/python3.6/site-packages/billiard/einfo.py View File

@@ -0,0 +1,134 @@
from __future__ import absolute_import

import sys
import traceback

__all__ = ['ExceptionInfo', 'Traceback']

DEFAULT_MAX_FRAMES = sys.getrecursionlimit() // 8


class _Code(object):

def __init__(self, code):
self.co_filename = code.co_filename
self.co_name = code.co_name
self.co_argcount = code.co_argcount
self.co_cellvars = ()
self.co_firstlineno = code.co_firstlineno
self.co_flags = code.co_flags
self.co_freevars = ()
self.co_code = b''
self.co_lnotab = b''
self.co_names = code.co_names
self.co_nlocals = code.co_nlocals
self.co_stacksize = code.co_stacksize
self.co_varnames = ()


class _Frame(object):
Code = _Code

def __init__(self, frame):
self.f_builtins = {}
self.f_globals = {
"__file__": frame.f_globals.get("__file__", "__main__"),
"__name__": frame.f_globals.get("__name__"),
"__loader__": None,
}
self.f_locals = fl = {}
try:
fl["__traceback_hide__"] = frame.f_locals["__traceback_hide__"]
except KeyError:
pass
self.f_trace = None
self.f_exc_traceback = None
self.f_exc_type = None
self.f_exc_value = None
self.f_code = self.Code(frame.f_code)
self.f_lineno = frame.f_lineno
self.f_lasti = frame.f_lasti
# don't want to hit https://bugs.python.org/issue21967
self.f_restricted = False


class _Object(object):

def __init__(self, **kw):
[setattr(self, k, v) for k, v in kw.items()]


class _Truncated(object):

def __init__(self):
self.tb_lineno = -1
self.tb_frame = _Object(
f_globals={"__file__": "",
"__name__": "",
"__loader__": None},
f_fileno=None,
f_code=_Object(co_filename="...",
co_name="[rest of traceback truncated]"),
)
self.tb_next = None
self.tb_lasti = 0


class Traceback(object):
Frame = _Frame

def __init__(self, tb, max_frames=DEFAULT_MAX_FRAMES, depth=0):
self.tb_frame = self.Frame(tb.tb_frame)
self.tb_lineno = tb.tb_lineno
self.tb_lasti = tb.tb_lasti
self.tb_next = None
if tb.tb_next is not None:
if depth <= max_frames:
self.tb_next = Traceback(tb.tb_next, max_frames, depth + 1)
else:
self.tb_next = _Truncated()


class ExceptionInfo(object):
"""Exception wrapping an exception and its traceback.

:param exc_info: The exception info tuple as returned by
:func:`sys.exc_info`.

"""

#: Exception type.
type = None

#: Exception instance.
exception = None

#: Pickleable traceback instance for use with :mod:`traceback`
tb = None

#: String representation of the traceback.
traceback = None

#: Set to true if this is an internal error.
internal = False

def __init__(self, exc_info=None, internal=False):
self.type, self.exception, tb = exc_info or sys.exc_info()
try:
self.tb = Traceback(tb)
self.traceback = ''.join(
traceback.format_exception(self.type, self.exception, tb),
)
self.internal = internal
finally:
del(tb)

def __str__(self):
return self.traceback

def __repr__(self):
return "<ExceptionInfo: %r>" % (self.exception, )

@property
def exc_info(self):
return self.type, self.exception, self.tb

+ 54
- 0
thesisenv/lib/python3.6/site-packages/billiard/exceptions.py View File

@@ -0,0 +1,54 @@
from __future__ import absolute_import

try:
from multiprocessing import (
ProcessError,
BufferTooShort,
TimeoutError,
AuthenticationError,
)
except ImportError:
class ProcessError(Exception): # noqa
pass

class BufferTooShort(Exception): # noqa
pass

class TimeoutError(Exception): # noqa
pass

class AuthenticationError(Exception): # noqa
pass


class TimeLimitExceeded(Exception):
"""The time limit has been exceeded and the job has been terminated."""

def __str__(self):
return "TimeLimitExceeded%s" % (self.args, )


class SoftTimeLimitExceeded(Exception):
"""The soft time limit has been exceeded. This exception is raised
to give the task a chance to clean up."""

def __str__(self):
return "SoftTimeLimitExceeded%s" % (self.args, )


class WorkerLostError(Exception):
"""The worker processing a job has exited prematurely."""


class Terminated(Exception):
"""The worker processing a job has been terminated by user request."""


class RestartFreqExceeded(Exception):
"""Restarts too fast."""


class CoroStop(Exception):
"""Coroutine exit, as opposed to StopIteration which may
mean it should be restarted."""
pass

+ 192
- 0
thesisenv/lib/python3.6/site-packages/billiard/five.py View File

@@ -0,0 +1,192 @@
# -*- coding: utf-8 -*-
"""
celery.five
~~~~~~~~~~~

Compatibility implementations of features
only available in newer Python versions.


"""
from __future__ import absolute_import

# ############# py3k #########################################################
import sys
PY3 = sys.version_info[0] == 3

try:
reload = reload # noqa
except NameError: # pragma: no cover
from imp import reload # noqa

try:
from UserList import UserList # noqa
except ImportError: # pragma: no cover
from collections import UserList # noqa

try:
from UserDict import UserDict # noqa
except ImportError: # pragma: no cover
from collections import UserDict # noqa

# ############# time.monotonic ###############################################

if sys.version_info < (3, 3):

import platform
SYSTEM = platform.system()

try:
import ctypes
except ImportError: # pragma: no cover
ctypes = None # noqa

if SYSTEM == 'Darwin' and ctypes is not None:
from ctypes.util import find_library
libSystem = ctypes.CDLL(find_library('libSystem.dylib'))
CoreServices = ctypes.CDLL(find_library('CoreServices'),
use_errno=True)
mach_absolute_time = libSystem.mach_absolute_time
mach_absolute_time.restype = ctypes.c_uint64
absolute_to_nanoseconds = CoreServices.AbsoluteToNanoseconds
absolute_to_nanoseconds.restype = ctypes.c_uint64
absolute_to_nanoseconds.argtypes = [ctypes.c_uint64]

def _monotonic():
return absolute_to_nanoseconds(mach_absolute_time()) * 1e-9

elif SYSTEM == 'Linux' and ctypes is not None:
# from stackoverflow:
# questions/1205722/how-do-i-get-monotonic-time-durations-in-python
import ctypes
import os

CLOCK_MONOTONIC = 1 # see <linux/time.h>

class timespec(ctypes.Structure):
_fields_ = [
('tv_sec', ctypes.c_long),
('tv_nsec', ctypes.c_long),
]

librt = ctypes.CDLL('librt.so.1', use_errno=True)
clock_gettime = librt.clock_gettime
clock_gettime.argtypes = [
ctypes.c_int, ctypes.POINTER(timespec),
]

def _monotonic(): # noqa
t = timespec()
if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(t)) != 0:
errno_ = ctypes.get_errno()
raise OSError(errno_, os.strerror(errno_))
return t.tv_sec + t.tv_nsec * 1e-9
else:
from time import time as _monotonic

try:
from time import monotonic
except ImportError:
monotonic = _monotonic # noqa

if PY3:
import builtins

from queue import Queue, Empty, Full
from itertools import zip_longest
from io import StringIO, BytesIO

map = map
string = str
string_t = str
long_t = int
text_t = str
range = range
int_types = (int, )

open_fqdn = 'builtins.open'

def items(d):
return d.items()

def keys(d):
return d.keys()

def values(d):
return d.values()

def nextfun(it):
return it.__next__

exec_ = getattr(builtins, 'exec')

def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value

class WhateverIO(StringIO):

def write(self, data):
if isinstance(data, bytes):
data = data.encode()
StringIO.write(self, data)

else:
import __builtin__ as builtins # noqa
from Queue import Queue, Empty, Full # noqa
from itertools import imap as map, izip_longest as zip_longest # noqa
from StringIO import StringIO # noqa
string = unicode # noqa
string_t = basestring # noqa
text_t = unicode
long_t = long # noqa
range = xrange
int_types = (int, long)

open_fqdn = '__builtin__.open'

def items(d): # noqa
return d.iteritems()

def keys(d): # noqa
return d.iterkeys()

def values(d): # noqa
return d.itervalues()

def nextfun(it): # noqa
return it.next

def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")

exec_("""def reraise(tp, value, tb=None): raise tp, value, tb""")

BytesIO = WhateverIO = StringIO # noqa


def with_metaclass(Type, skip_attrs=set(['__dict__', '__weakref__'])):
"""Class decorator to set metaclass.

Works with both Python 2 and Python 3 and it does not add
an extra class in the lookup order like ``six.with_metaclass`` does
(that is -- it copies the original class instead of using inheritance).

"""

def _clone_with_metaclass(Class):
attrs = dict((key, value) for key, value in items(vars(Class))
if key not in skip_attrs)
return Type(Class.__name__, Class.__bases__, attrs)

return _clone_with_metaclass

+ 580
- 0
thesisenv/lib/python3.6/site-packages/billiard/forking.py View File

@@ -0,0 +1,580 @@
#
# Module for starting a process object using os.fork() or CreateProcess()
#
# multiprocessing/forking.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#

from __future__ import absolute_import

import os
import sys
import signal
import warnings

from pickle import load, HIGHEST_PROTOCOL
from billiard import util
from billiard import process
from billiard.five import int_types
from .reduction import dump
from .compat import _winapi as win32

__all__ = ['Popen', 'assert_spawning', 'exit',
'duplicate', 'close']

try:
WindowsError = WindowsError # noqa
except NameError:
class WindowsError(Exception): # noqa
pass

W_OLD_DJANGO_LAYOUT = """\
Will add directory %r to path! This is necessary to accommodate \
pre-Django 1.4 layouts using setup_environ.
You can skip this warning by adding a DJANGO_SETTINGS_MODULE=settings \
environment variable.
"""

#
# Choose whether to do a fork or spawn (fork+exec) on Unix.
# This affects how some shared resources should be created.
#

_forking_is_enabled = sys.platform != 'win32'

#
# Check that the current thread is spawning a child process
#


def assert_spawning(self):
if not Popen.thread_is_spawning():
raise RuntimeError(
'%s objects should only be shared between processes'
' through inheritance' % type(self).__name__
)


#
# Unix
#

if sys.platform != 'win32':
try:
import thread
except ImportError:
import _thread as thread # noqa
import select

WINEXE = False
WINSERVICE = False

exit = os._exit
duplicate = os.dup
close = os.close
_select = util._eintr_retry(select.select)

#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#

class Popen(object):

_tls = thread._local()

def __init__(self, process_obj):
# register reducers
from billiard import connection # noqa
_Django_old_layout_hack__save()
sys.stdout.flush()
sys.stderr.flush()
self.returncode = None
r, w = os.pipe()
self.sentinel = r

if _forking_is_enabled:
self.pid = os.fork()
if self.pid == 0:
os.close(r)
if 'random' in sys.modules:
import random
random.seed()
code = process_obj._bootstrap()
os._exit(code)
else:
from_parent_fd, to_child_fd = os.pipe()
cmd = get_command_line() + [str(from_parent_fd)]

self.pid = os.fork()
if self.pid == 0:
os.close(r)
os.close(to_child_fd)
os.execv(sys.executable, cmd)

# send information to child
prep_data = get_preparation_data(process_obj._name)
os.close(from_parent_fd)
to_child = os.fdopen(to_child_fd, 'wb')
Popen._tls.process_handle = self.pid
try:
dump(prep_data, to_child, HIGHEST_PROTOCOL)
dump(process_obj, to_child, HIGHEST_PROTOCOL)
finally:
del(Popen._tls.process_handle)
to_child.close()

# `w` will be closed when the child exits, at which point `r`
# will become ready for reading (using e.g. select()).
os.close(w)
util.Finalize(self, os.close, (self.sentinel,))

def poll(self, flag=os.WNOHANG):
if self.returncode is None:
try:
pid, sts = os.waitpid(self.pid, flag)
except os.error:
# Child process not yet created. See #1731717
# e.errno == errno.ECHILD == 10
return None
if pid == self.pid:
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
else:
assert os.WIFEXITED(sts)
self.returncode = os.WEXITSTATUS(sts)
return self.returncode

def wait(self, timeout=None):
if self.returncode is None:
if timeout is not None:
r = _select([self.sentinel], [], [], timeout)[0]
if not r:
return None
# This shouldn't block if select() returned successfully.
return self.poll(os.WNOHANG if timeout == 0.0 else 0)
return self.returncode

def terminate(self):
if self.returncode is None:
try:
os.kill(self.pid, signal.SIGTERM)
except OSError:
if self.wait(timeout=0.1) is None:
raise

@staticmethod
def thread_is_spawning():
if _forking_is_enabled:
return False
else:
return getattr(Popen._tls, 'process_handle', None) is not None

@staticmethod
def duplicate_for_child(handle):
return handle

#
# Windows
#

else:
try:
import thread
except ImportError:
import _thread as thread # noqa
import msvcrt
try:
import _subprocess
except ImportError:
import _winapi as _subprocess # noqa

#
#
#

TERMINATE = 0x10000
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")

exit = win32.ExitProcess
close = win32.CloseHandle

#
#
#

def duplicate(handle, target_process=None, inheritable=False):
if target_process is None:
target_process = _subprocess.GetCurrentProcess()
h = _subprocess.DuplicateHandle(
_subprocess.GetCurrentProcess(), handle, target_process,
0, inheritable, _subprocess.DUPLICATE_SAME_ACCESS
)
if sys.version_info[0] < 3 or (
sys.version_info[0] == 3 and sys.version_info[1] < 3):
h = h.Detach()
return h

#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#

class Popen(object):
'''
Start a subprocess to run the code of a process object
'''
_tls = thread._local()

def __init__(self, process_obj):
_Django_old_layout_hack__save()
# create pipe for communication with child
rfd, wfd = os.pipe()

# get handle for read end of the pipe and make it inheritable
rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True)
os.close(rfd)

# start process
cmd = get_command_line() + [rhandle]
cmd = ' '.join('"%s"' % x for x in cmd)
hp, ht, pid, tid = _subprocess.CreateProcess(
_python_exe, cmd, None, None, 1, 0, None, None, None
)
close(ht) if isinstance(ht, int_types) else ht.Close()
(close(rhandle) if isinstance(rhandle, int_types)
else rhandle.Close())

# set attributes of self
self.pid = pid
self.returncode = None
self._handle = hp
self.sentinel = int(hp)

# send information to child
prep_data = get_preparation_data(process_obj._name)
to_child = os.fdopen(wfd, 'wb')
Popen._tls.process_handle = int(hp)
try:
dump(prep_data, to_child, HIGHEST_PROTOCOL)
dump(process_obj, to_child, HIGHEST_PROTOCOL)
finally:
del Popen._tls.process_handle
to_child.close()

@staticmethod
def thread_is_spawning():
return getattr(Popen._tls, 'process_handle', None) is not None

@staticmethod
def duplicate_for_child(handle):
return duplicate(handle, Popen._tls.process_handle)

def wait(self, timeout=None):
if self.returncode is None:
if timeout is None:
msecs = _subprocess.INFINITE
else:
msecs = max(0, int(timeout * 1000 + 0.5))

res = _subprocess.WaitForSingleObject(int(self._handle), msecs)
if res == _subprocess.WAIT_OBJECT_0:
code = _subprocess.GetExitCodeProcess(self._handle)
if code == TERMINATE:
code = -signal.SIGTERM
self.returncode = code

return self.returncode

def poll(self):
return self.wait(timeout=0)

def terminate(self):
if self.returncode is None:
try:
_subprocess.TerminateProcess(int(self._handle), TERMINATE)
except WindowsError:
if self.wait(timeout=0.1) is None:
raise

#
#
#

if WINSERVICE:
_python_exe = os.path.join(sys.exec_prefix, 'python.exe')
else:
_python_exe = sys.executable


def set_executable(exe):
global _python_exe
_python_exe = exe


def is_forking(argv):
'''
Return whether commandline indicates we are forking
'''
if len(argv) >= 2 and argv[1] == '--billiard-fork':
assert len(argv) == 3
os.environ["FORKED_BY_MULTIPROCESSING"] = "1"
return True
else:
return False


def freeze_support():
'''
Run code for process object if this in not the main process
'''
if is_forking(sys.argv):
main()
sys.exit()


def get_command_line():
'''
Returns prefix of command line used for spawning a child process
'''
if process.current_process()._identity == () and is_forking(sys.argv):
raise RuntimeError('''
Attempt to start a new process before the current process
has finished its bootstrapping phase.

This probably means that have forgotten to use the proper
idiom in the main module:

if __name__ == '__main__':
freeze_support()
...

The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce a Windows executable.''')

if getattr(sys, 'frozen', False):
return [sys.executable, '--billiard-fork']
else:
prog = 'from billiard.forking import main; main()'
return [_python_exe, '-c', prog, '--billiard-fork']


def _Django_old_layout_hack__save():
if 'DJANGO_PROJECT_DIR' not in os.environ:
try:
settings_name = os.environ['DJANGO_SETTINGS_MODULE']
except KeyError:
return # not using Django.

conf_settings = sys.modules.get('django.conf.settings')
configured = conf_settings and conf_settings.configured
try:
project_name, _ = settings_name.split('.', 1)
except ValueError:
return # not modified by setup_environ

project = __import__(project_name)
try:
project_dir = os.path.normpath(_module_parent_dir(project))
except AttributeError:
return # dynamically generated module (no __file__)
if configured:
warnings.warn(UserWarning(
W_OLD_DJANGO_LAYOUT % os.path.realpath(project_dir)
))
os.environ['DJANGO_PROJECT_DIR'] = project_dir


def _Django_old_layout_hack__load():
try:
sys.path.append(os.environ['DJANGO_PROJECT_DIR'])
except KeyError:
pass


def _module_parent_dir(mod):
dir, filename = os.path.split(_module_dir(mod))
if dir == os.curdir or not dir:
dir = os.getcwd()
return dir


def _module_dir(mod):
if '__init__.py' in mod.__file__:
return os.path.dirname(mod.__file__)
return mod.__file__


def main():
'''
Run code specifed by data received over pipe
'''
global _forking_is_enabled
_Django_old_layout_hack__load()

assert is_forking(sys.argv)
_forking_is_enabled = False

handle = int(sys.argv[-1])
if sys.platform == 'win32':
fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
else:
fd = handle
from_parent = os.fdopen(fd, 'rb')

process.current_process()._inheriting = True
preparation_data = load(from_parent)
prepare(preparation_data)
# Huge hack to make logging before Process.run work.
try:
os.environ["MP_MAIN_FILE"] = sys.modules["__main__"].__file__
except KeyError:
pass
except AttributeError:
pass
loglevel = os.environ.get("_MP_FORK_LOGLEVEL_")
logfile = os.environ.get("_MP_FORK_LOGFILE_") or None
format = os.environ.get("_MP_FORK_LOGFORMAT_")
if loglevel:
from billiard import util
import logging
logger = util.get_logger()
logger.setLevel(int(loglevel))
if not logger.handlers:
logger._rudimentary_setup = True
logfile = logfile or sys.__stderr__
if hasattr(logfile, "write"):
handler = logging.StreamHandler(logfile)
else:
handler = logging.FileHandler(logfile)
formatter = logging.Formatter(
format or util.DEFAULT_LOGGING_FORMAT,
)
handler.setFormatter(formatter)
logger.addHandler(handler)

self = load(from_parent)
process.current_process()._inheriting = False

from_parent.close()

exitcode = self._bootstrap()
exit(exitcode)


def get_preparation_data(name):
'''
Return info about parent needed by child to unpickle process object
'''
from billiard.util import _logger, _log_to_stderr

d = dict(
name=name,
sys_path=sys.path,
sys_argv=sys.argv,
log_to_stderr=_log_to_stderr,
orig_dir=process.ORIGINAL_DIR,
authkey=process.current_process().authkey,
)

if _logger is not None:
d['log_level'] = _logger.getEffectiveLevel()

if not WINEXE and not WINSERVICE:
main_path = getattr(sys.modules['__main__'], '__file__', None)
if not main_path and sys.argv[0] not in ('', '-c'):
main_path = sys.argv[0]
if main_path is not None:
if (not os.path.isabs(main_path) and
process.ORIGINAL_DIR is not None):
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
d['main_path'] = os.path.normpath(main_path)

return d

#
# Prepare current process
#

old_main_modules = []


def prepare(data):
'''
Try to get current process ready to unpickle process object
'''
old_main_modules.append(sys.modules['__main__'])

if 'name' in data:
process.current_process().name = data['name']

if 'authkey' in data:
process.current_process()._authkey = data['authkey']

if 'log_to_stderr' in data and data['log_to_stderr']:
util.log_to_stderr()

if 'log_level' in data:
util.get_logger().setLevel(data['log_level'])

if 'sys_path' in data:
sys.path = data['sys_path']

if 'sys_argv' in data:
sys.argv = data['sys_argv']

if 'dir' in data:
os.chdir(data['dir'])

if 'orig_dir' in data:
process.ORIGINAL_DIR = data['orig_dir']

if 'main_path' in data:
main_path = data['main_path']
main_name = os.path.splitext(os.path.basename(main_path))[0]
if main_name == '__init__':
main_name = os.path.basename(os.path.dirname(main_path))

if main_name == '__main__':
main_module = sys.modules['__main__']
main_module.__file__ = main_path
elif main_name != 'ipython':
# Main modules not actually called __main__.py may
# contain additional code that should still be executed
import imp

if main_path is None:
dirs = None
elif os.path.basename(main_path).startswith('__init__.py'):
dirs = [os.path.dirname(os.path.dirname(main_path))]
else:
dirs = [os.path.dirname(main_path)]

assert main_name not in sys.modules, main_name
file, path_name, etc = imp.find_module(main_name, dirs)
try:
# We would like to do "imp.load_module('__main__', ...)"
# here. However, that would cause 'if __name__ ==
# "__main__"' clauses to be executed.
main_module = imp.load_module(
'__parents_main__', file, path_name, etc
)
finally:
if file:
file.close()

sys.modules['__main__'] = main_module
main_module.__name__ = '__main__'

# Try to make the potentially picklable objects in
# sys.modules['__main__'] realize they are in the main
# module -- somewhat ugly.
for obj in list(main_module.__dict__.values()):
try:
if obj.__module__ == '__parents_main__':
obj.__module__ = '__main__'
except Exception:
pass

+ 255
- 0
thesisenv/lib/python3.6/site-packages/billiard/heap.py View File

@@ -0,0 +1,255 @@
#
# Module which supports allocation of memory from an mmap
#
# multiprocessing/heap.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import

import bisect
import mmap
import os
import sys
import threading
import itertools

from ._ext import _billiard, win32
from .util import Finalize, info, get_temp_dir
from .forking import assert_spawning
from .reduction import ForkingPickler

__all__ = ['BufferWrapper']

try:
maxsize = sys.maxsize
except AttributeError:
maxsize = sys.maxint

#
# Inheirtable class which wraps an mmap, and from which blocks can be allocated
#

if sys.platform == 'win32':

class Arena(object):

_counter = itertools.count()

def __init__(self, size):
self.size = size
self.name = 'pym-%d-%d' % (os.getpid(), next(Arena._counter))
self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
assert win32.GetLastError() == 0, 'tagname already in use'
self._state = (self.size, self.name)

def __getstate__(self):
assert_spawning(self)
return self._state

def __setstate__(self, state):
self.size, self.name = self._state = state
self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
assert win32.GetLastError() == win32.ERROR_ALREADY_EXISTS

else:

class Arena(object):

_counter = itertools.count()

def __init__(self, size, fileno=-1):
from .forking import _forking_is_enabled
self.size = size
self.fileno = fileno
if fileno == -1 and not _forking_is_enabled:
name = os.path.join(
get_temp_dir(),
'pym-%d-%d' % (os.getpid(), next(self._counter)))
self.fileno = os.open(
name, os.O_RDWR | os.O_CREAT | os.O_EXCL, 0o600)
os.unlink(name)
os.ftruncate(self.fileno, size)
self.buffer = mmap.mmap(self.fileno, self.size)

def reduce_arena(a):
if a.fileno == -1:
raise ValueError('Arena is unpicklable because'
'forking was enabled when it was created')
return Arena, (a.size, a.fileno)

ForkingPickler.register(Arena, reduce_arena)

#
# Class allowing allocation of chunks of memory from arenas
#


class Heap(object):

_alignment = 8

def __init__(self, size=mmap.PAGESIZE):
self._lastpid = os.getpid()
self._lock = threading.Lock()
self._size = size
self._lengths = []
self._len_to_seq = {}
self._start_to_block = {}
self._stop_to_block = {}
self._allocated_blocks = set()
self._arenas = []
# list of pending blocks to free - see free() comment below
self._pending_free_blocks = []

@staticmethod
def _roundup(n, alignment):
# alignment must be a power of 2
mask = alignment - 1
return (n + mask) & ~mask

def _malloc(self, size):
# returns a large enough block -- it might be much larger
i = bisect.bisect_left(self._lengths, size)
if i == len(self._lengths):
length = self._roundup(max(self._size, size), mmap.PAGESIZE)
self._size *= 2
info('allocating a new mmap of length %d', length)
arena = Arena(length)
self._arenas.append(arena)
return (arena, 0, length)
else:
length = self._lengths[i]
seq = self._len_to_seq[length]
block = seq.pop()
if not seq:
del self._len_to_seq[length], self._lengths[i]

(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]
return block

def _free(self, block):
# free location and try to merge with neighbours
(arena, start, stop) = block

try:
prev_block = self._stop_to_block[(arena, start)]
except KeyError:
pass
else:
start, _ = self._absorb(prev_block)

try:
next_block = self._start_to_block[(arena, stop)]
except KeyError:
pass
else:
_, stop = self._absorb(next_block)

block = (arena, start, stop)
length = stop - start

try:
self._len_to_seq[length].append(block)
except KeyError:
self._len_to_seq[length] = [block]
bisect.insort(self._lengths, length)

self._start_to_block[(arena, start)] = block
self._stop_to_block[(arena, stop)] = block

def _absorb(self, block):
# deregister this block so it can be merged with a neighbour
(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]

length = stop - start
seq = self._len_to_seq[length]
seq.remove(block)
if not seq:
del self._len_to_seq[length]
self._lengths.remove(length)

return start, stop

def _free_pending_blocks(self):
# Free all the blocks in the pending list - called with the lock held
while 1:
try:
block = self._pending_free_blocks.pop()
except IndexError:
break
self._allocated_blocks.remove(block)
self._free(block)

def free(self, block):
# free a block returned by malloc()
# Since free() can be called asynchronously by the GC, it could happen
# that it's called while self._lock is held: in that case,
# self._lock.acquire() would deadlock (issue #12352). To avoid that, a
# trylock is used instead, and if the lock can't be acquired
# immediately, the block is added to a list of blocks to be freed
# synchronously sometimes later from malloc() or free(), by calling
# _free_pending_blocks() (appending and retrieving from a list is not
# strictly thread-safe but under cPython it's atomic thanks
# to the GIL).
assert os.getpid() == self._lastpid
if not self._lock.acquire(False):
# can't aquire the lock right now, add the block to the list of
# pending blocks to free
self._pending_free_blocks.append(block)
else:
# we hold the lock
try:
self._free_pending_blocks()
self._allocated_blocks.remove(block)
self._free(block)
finally:
self._lock.release()

def malloc(self, size):
# return a block of right size (possibly rounded up)
assert 0 <= size < maxsize
if os.getpid() != self._lastpid:
self.__init__() # reinitialize after fork
self._lock.acquire()
self._free_pending_blocks()
try:
size = self._roundup(max(size, 1), self._alignment)
(arena, start, stop) = self._malloc(size)
new_stop = start + size
if new_stop < stop:
self._free((arena, new_stop, stop))
block = (arena, start, new_stop)
self._allocated_blocks.add(block)
return block
finally:
self._lock.release()

#
# Class representing a chunk of an mmap -- can be inherited
#


class BufferWrapper(object):

_heap = Heap()

def __init__(self, size):
assert 0 <= size < maxsize
block = BufferWrapper._heap.malloc(size)
self._state = (block, size)
Finalize(self, BufferWrapper._heap.free, args=(block,))

def get_address(self):
(arena, start, stop), size = self._state
address, length = _billiard.address_of_buffer(arena.buffer)
assert size <= length
return address + start

def get_size(self):
return self._state[1]

+ 1169
- 0
thesisenv/lib/python3.6/site-packages/billiard/managers.py
File diff suppressed because it is too large
View File


+ 1959
- 0
thesisenv/lib/python3.6/site-packages/billiard/pool.py
File diff suppressed because it is too large
View File


+ 368
- 0
thesisenv/lib/python3.6/site-packages/billiard/process.py View File

@@ -0,0 +1,368 @@
#
# Module providing the `Process` class which emulates `threading.Thread`
#
# multiprocessing/process.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import

#
# Imports
#

import os
import sys
import signal
import itertools
import binascii
import logging
import threading

from multiprocessing import process as _mproc

from .compat import bytes
try:
from _weakrefset import WeakSet
except ImportError:
WeakSet = None # noqa
from .five import items, string_t

try:
ORIGINAL_DIR = os.path.abspath(os.getcwd())
except OSError:
ORIGINAL_DIR = None

__all__ = ['Process', 'current_process', 'active_children']

#
# Public functions
#


def current_process():
'''
Return process object representing the current process
'''
return _current_process


def _set_current_process(process):
global _current_process
_current_process = _mproc._current_process = process


def _cleanup():
# check for processes which have finished
if _current_process is not None:
for p in list(_current_process._children):
if p._popen.poll() is not None:
_current_process._children.discard(p)


def _maybe_flush(f):
try:
f.flush()
except (AttributeError, EnvironmentError, NotImplementedError):
pass


def active_children(_cleanup=_cleanup):
'''
Return list of process objects corresponding to live child processes
'''
try:
_cleanup()
except TypeError:
# called after gc collect so _cleanup does not exist anymore
return []
if _current_process is not None:
return list(_current_process._children)
return []


class Process(object):
'''
Process objects represent activity that is run in a separate process

The class is analagous to `threading.Thread`
'''
_Popen = None

def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, daemon=None, **_kw):
assert group is None, 'group argument must be None for now'
count = next(_current_process._counter)
self._identity = _current_process._identity + (count,)
self._authkey = _current_process._authkey
if daemon is not None:
self._daemonic = daemon
else:
self._daemonic = _current_process._daemonic
self._tempdir = _current_process._tempdir
self._semprefix = _current_process._semprefix
self._unlinkfd = _current_process._unlinkfd
self._parent_pid = os.getpid()
self._popen = None
self._target = target
self._args = tuple(args)
self._kwargs = dict(kwargs)
self._name = (
name or type(self).__name__ + '-' +
':'.join(str(i) for i in self._identity)
)
if _dangling is not None:
_dangling.add(self)

def run(self):
'''
Method to be run in sub-process; can be overridden in sub-class
'''
if self._target:
self._target(*self._args, **self._kwargs)

def start(self):
'''
Start child process
'''
assert self._popen is None, 'cannot start a process twice'
assert self._parent_pid == os.getpid(), \
'can only start a process object created by current process'
_cleanup()
if self._Popen is not None:
Popen = self._Popen
else:
from .forking import Popen
self._popen = Popen(self)
self._sentinel = self._popen.sentinel
_current_process._children.add(self)

def terminate(self):
'''
Terminate process; sends SIGTERM signal or uses TerminateProcess()
'''
self._popen.terminate()

def join(self, timeout=None):
'''
Wait until child process terminates
'''
assert self._parent_pid == os.getpid(), 'can only join a child process'
assert self._popen is not None, 'can only join a started process'
res = self._popen.wait(timeout)
if res is not None:
_current_process._children.discard(self)

def is_alive(self):
'''
Return whether process is alive
'''
if self is _current_process:
return True
assert self._parent_pid == os.getpid(), 'can only test a child process'
if self._popen is None:
return False
self._popen.poll()
return self._popen.returncode is None

def _is_alive(self):
if self._popen is None:
return False
return self._popen.poll() is None

def _get_name(self):
return self._name

def _set_name(self, value):
assert isinstance(name, string_t), 'name must be a string'
self._name = value
name = property(_get_name, _set_name)

def _get_daemon(self):
return self._daemonic

def _set_daemon(self, daemonic):
assert self._popen is None, 'process has already started'
self._daemonic = daemonic
daemon = property(_get_daemon, _set_daemon)

def _get_authkey(self):
return self._authkey

def _set_authkey(self, authkey):
self._authkey = AuthenticationString(authkey)
authkey = property(_get_authkey, _set_authkey)

@property
def exitcode(self):
'''
Return exit code of process or `None` if it has yet to stop
'''
if self._popen is None:
return self._popen
return self._popen.poll()

@property
def ident(self):
'''
Return identifier (PID) of process or `None` if it has yet to start
'''
if self is _current_process:
return os.getpid()
else:
return self._popen and self._popen.pid

pid = ident

@property
def sentinel(self):
'''
Return a file descriptor (Unix) or handle (Windows) suitable for
waiting for process termination.
'''
try:
return self._sentinel
except AttributeError:
raise ValueError("process not started")

def __repr__(self):
if self is _current_process:
status = 'started'
elif self._parent_pid != os.getpid():
status = 'unknown'
elif self._popen is None:
status = 'initial'
else:
if self._popen.poll() is not None:
status = self.exitcode
else:
status = 'started'

if type(status) is int:
if status == 0:
status = 'stopped'
else:
status = 'stopped[%s]' % _exitcode_to_name.get(status, status)

return '<%s(%s, %s%s)>' % (type(self).__name__, self._name,
status, self._daemonic and ' daemon' or '')

##

def _bootstrap(self):
from . import util
global _current_process

try:
self._children = set()
self._counter = itertools.count(1)
if sys.stdin is not None:
try:
sys.stdin.close()
sys.stdin = open(os.devnull)
except (OSError, ValueError):
pass
old_process = _current_process
_set_current_process(self)

# Re-init logging system.
# Workaround for http://bugs.python.org/issue6721/#msg140215
# Python logging module uses RLock() objects which are broken
# after fork. This can result in a deadlock (Celery Issue #496).
loggerDict = logging.Logger.manager.loggerDict
logger_names = list(loggerDict.keys())
logger_names.append(None) # for root logger
for name in logger_names:
if not name or not isinstance(loggerDict[name],
logging.PlaceHolder):
for handler in logging.getLogger(name).handlers:
handler.createLock()
logging._lock = threading.RLock()

try:
util._finalizer_registry.clear()
util._run_after_forkers()
finally:
# delay finalization of the old process object until after
# _run_after_forkers() is executed
del old_process
util.info('child process %s calling self.run()', self.pid)
try:
self.run()
exitcode = 0
finally:
util._exit_function()
except SystemExit as exc:
if not exc.args:
exitcode = 1
elif isinstance(exc.args[0], int):
exitcode = exc.args[0]
else:
sys.stderr.write(str(exc.args[0]) + '\n')
_maybe_flush(sys.stderr)
exitcode = 0 if isinstance(exc.args[0], str) else 1
except:
exitcode = 1
if not util.error('Process %s', self.name, exc_info=True):
import traceback
sys.stderr.write('Process %s:\n' % self.name)
traceback.print_exc()
finally:
util.info('process %s exiting with exitcode %d',
self.pid, exitcode)
_maybe_flush(sys.stdout)
_maybe_flush(sys.stderr)
return exitcode

#
# We subclass bytes to avoid accidental transmission of auth keys over network
#


class AuthenticationString(bytes):

def __reduce__(self):
from .forking import Popen

if not Popen.thread_is_spawning():
raise TypeError(
'Pickling an AuthenticationString object is '
'disallowed for security reasons')
return AuthenticationString, (bytes(self),)

#
# Create object representing the main process
#


class _MainProcess(Process):

def __init__(self):
self._identity = ()
self._daemonic = False
self._name = 'MainProcess'
self._parent_pid = None
self._popen = None
self._counter = itertools.count(1)
self._children = set()
self._authkey = AuthenticationString(os.urandom(32))
self._tempdir = None
self._semprefix = 'mp-' + binascii.hexlify(
os.urandom(4)).decode('ascii')
self._unlinkfd = None

_current_process = _MainProcess()
del _MainProcess

#
# Give names to some return codes
#

_exitcode_to_name = {}

for name, signum in items(signal.__dict__):
if name[:3] == 'SIG' and '_' not in name:
_exitcode_to_name[-signum] = name

_dangling = WeakSet() if WeakSet is not None else None

+ 0
- 0
thesisenv/lib/python3.6/site-packages/billiard/py3/__init__.py View File


+ 965
- 0
thesisenv/lib/python3.6/site-packages/billiard/py3/connection.py View File

@@ -0,0 +1,965 @@
#
# A higher level module for using sockets (or Windows named pipes)
#
# multiprocessing/connection.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import

import io
import os
import sys
import select
import socket
import struct
import errno
import tempfile
import itertools

import _multiprocessing
from ..compat import setblocking
from ..exceptions import AuthenticationError, BufferTooShort
from ..five import monotonic
from ..util import get_temp_dir, Finalize, sub_debug
from ..reduction import ForkingPickler

try:
import _winapi

WAIT_OBJECT_0 = _winapi.WAIT_OBJECT_0
WAIT_TIMEOUT = _winapi.WAIT_TIMEOUT
INFINITE = _winapi.INFINITE
# if we got here, we seem to be running on Windows. Handle probably
# missing WAIT_ABANDONED_0 constant:
try:
WAIT_ABANDONED_0 = _winapi.WAIT_ABANDONED_0
except ImportError:
# _winapi seems to be not exporting
# this constant, fallback solution until
# exported in _winapio
WAIT_ABANDONED_0 = 128
except ImportError:
if sys.platform == 'win32':
raise
_winapi = None

__all__ = ['Client', 'Listener', 'Pipe', 'wait']

#
#
#

BUFSIZE = 8192
# A very generous timeout when it comes to local connections...
CONNECTION_TIMEOUT = 20.

_mmap_counter = itertools.count()

default_family = 'AF_INET'
families = ['AF_INET']

if hasattr(socket, 'AF_UNIX'):
default_family = 'AF_UNIX'
families += ['AF_UNIX']

if sys.platform == 'win32':
default_family = 'AF_PIPE'
families += ['AF_PIPE']


def _init_timeout(timeout=CONNECTION_TIMEOUT):
return monotonic() + timeout


def _check_timeout(t):
return monotonic() > t


def arbitrary_address(family):
'''
Return an arbitrary free address for the given family
'''
if family == 'AF_INET':
return ('localhost', 0)
elif family == 'AF_UNIX':
return tempfile.mktemp(prefix='listener-', dir=get_temp_dir())
elif family == 'AF_PIPE':
return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %
(os.getpid(), next(_mmap_counter)))
else:
raise ValueError('unrecognized family')


def _validate_family(family):
'''
Checks if the family is valid for the current environment.
'''
if sys.platform != 'win32' and family == 'AF_PIPE':
raise ValueError('Family %s is not recognized.' % family)

if sys.platform == 'win32' and family == 'AF_UNIX':
# double check
if not hasattr(socket, family):
raise ValueError('Family %s is not recognized.' % family)


def address_type(address):
'''
Return the types of the address

This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE'
'''
if type(address) == tuple:
return 'AF_INET'
elif type(address) is str and address.startswith('\\\\'):
return 'AF_PIPE'
elif type(address) is str:
return 'AF_UNIX'
else:
raise ValueError('address type of %r unrecognized' % address)

#
# Connection classes
#


class _ConnectionBase:
_handle = None

def __init__(self, handle, readable=True, writable=True):
handle = handle.__index__()
if handle < 0:
raise ValueError("invalid handle")
if not readable and not writable:
raise ValueError(
"at least one of `readable` and `writable` must be True")
self._handle = handle
self._readable = readable
self._writable = writable

# XXX should we use util.Finalize instead of a __del__?

def __del__(self):
if self._handle is not None:
self._close()

def _check_closed(self):
if self._handle is None:
raise OSError("handle is closed")

def _check_readable(self):
if not self._readable:
raise OSError("connection is write-only")

def _check_writable(self):
if not self._writable:
raise OSError("connection is read-only")

def _bad_message_length(self):
if self._writable:
self._readable = False
else:
self.close()
raise OSError("bad message length")

@property
def closed(self):
"""True if the connection is closed"""
return self._handle is None

@property
def readable(self):
"""True if the connection is readable"""
return self._readable

@property
def writable(self):
"""True if the connection is writable"""
return self._writable

def fileno(self):
"""File descriptor or handle of the connection"""
self._check_closed()
return self._handle

def close(self):
"""Close the connection"""
if self._handle is not None:
try:
self._close()
finally:
self._handle = None

def send_bytes(self, buf, offset=0, size=None):
"""Send the bytes data from a bytes-like object"""
self._check_closed()
self._check_writable()
m = memoryview(buf)
# HACK for byte-indexing of non-bytewise buffers (e.g. array.array)
if m.itemsize > 1:
m = memoryview(bytes(m))
n = len(m)
if offset < 0:
raise ValueError("offset is negative")
if n < offset:
raise ValueError("buffer length < offset")
if size is None:
size = n - offset
elif size < 0:
raise ValueError("size is negative")
elif offset + size > n:
raise ValueError("buffer length < offset + size")
self._send_bytes(m[offset:offset + size])

def send(self, obj):
"""Send a (picklable) object"""
self._check_closed()
self._check_writable()
self._send_bytes(ForkingPickler.dumps(obj))

def recv_bytes(self, maxlength=None):
"""
Receive bytes data as a bytes object.
"""
self._check_closed()
self._check_readable()
if maxlength is not None and maxlength < 0:
raise ValueError("negative maxlength")
buf = self._recv_bytes(maxlength)
if buf is None:
self._bad_message_length()
return buf.getvalue()

def recv_bytes_into(self, buf, offset=0):
"""
Receive bytes data into a writeable buffer-like object.
Return the number of bytes read.
"""
self._check_closed()
self._check_readable()
with memoryview(buf) as m:
# Get bytesize of arbitrary buffer
itemsize = m.itemsize
bytesize = itemsize * len(m)
if offset < 0:
raise ValueError("negative offset")
elif offset > bytesize:
raise ValueError("offset too large")
result = self._recv_bytes()
size = result.tell()
if bytesize < offset + size:
raise BufferTooShort(result.getvalue())
# Message can fit in dest
result.seek(0)
result.readinto(
m[offset // itemsize:(offset + size) // itemsize]
)
return size

def recv_payload(self):
return self._recv_bytes().getbuffer()

def recv(self):
"""Receive a (picklable) object"""
self._check_closed()
self._check_readable()
buf = self._recv_bytes()
return ForkingPickler.loads(buf.getbuffer())

def poll(self, timeout=0.0):
"""Whether there is any input available to be read"""
self._check_closed()
self._check_readable()
return self._poll(timeout)

def __enter__(self):
return self

def __exit__(self, exc_type, exc_value, exc_tb):
self.close()


if _winapi:

class PipeConnection(_ConnectionBase):
"""
Connection class based on a Windows named pipe.
Overlapped I/O is used, so the handles must have been created
with FILE_FLAG_OVERLAPPED.
"""
_got_empty_message = False

def _close(self, _CloseHandle=_winapi.CloseHandle):
_CloseHandle(self._handle)

def _send_bytes(self, buf):
ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True)
try:
if err == _winapi.ERROR_IO_PENDING:
waitres = _winapi.WaitForMultipleObjects(
[ov.event], False, INFINITE)
assert waitres == WAIT_OBJECT_0
except:
ov.cancel()
raise
finally:
nwritten, err = ov.GetOverlappedResult(True)
assert err == 0
assert nwritten == len(buf)

def _recv_bytes(self, maxsize=None):
if self._got_empty_message:
self._got_empty_message = False
return io.BytesIO()
else:
bsize = 128 if maxsize is None else min(maxsize, 128)
try:
ov, err = _winapi.ReadFile(self._handle, bsize,
overlapped=True)
try:
if err == _winapi.ERROR_IO_PENDING:
waitres = _winapi.WaitForMultipleObjects(
[ov.event], False, INFINITE)
assert waitres == WAIT_OBJECT_0
except:
ov.cancel()
raise
finally:
nread, err = ov.GetOverlappedResult(True)
if err == 0:
f = io.BytesIO()
f.write(ov.getbuffer())
return f
elif err == _winapi.ERROR_MORE_DATA:
return self._get_more_data(ov, maxsize)
except OSError as e:
if e.winerror == _winapi.ERROR_BROKEN_PIPE:
raise EOFError
else:
raise
raise RuntimeError(
"shouldn't get here; expected KeyboardInterrupt"
)

def _poll(self, timeout):
if (self._got_empty_message or
_winapi.PeekNamedPipe(self._handle)[0] != 0):
return True
return bool(wait([self], timeout))

def _get_more_data(self, ov, maxsize):
buf = ov.getbuffer()
f = io.BytesIO()
f.write(buf)
left = _winapi.PeekNamedPipe(self._handle)[1]
assert left > 0
if maxsize is not None and len(buf) + left > maxsize:
self._bad_message_length()
ov, err = _winapi.ReadFile(self._handle, left, overlapped=True)
rbytes, err = ov.GetOverlappedResult(True)
assert err == 0
assert rbytes == left
f.write(ov.getbuffer())
return f


class Connection(_ConnectionBase):
"""
Connection class based on an arbitrary file descriptor (Unix only), or
a socket handle (Windows).
"""

if _winapi:
def _close(self, _close=_multiprocessing.closesocket):
_close(self._handle)
_write = _multiprocessing.send
_read = _multiprocessing.recv
else:
def _close(self, _close=os.close): # noqa
_close(self._handle)
_write = os.write
_read = os.read

def send_offset(self, buf, offset, write=_write):
return write(self._handle, buf[offset:])

def _send(self, buf, write=_write):
remaining = len(buf)
while True:
try:
n = write(self._handle, buf)
except OSError as exc:
if exc.errno == errno.EINTR:
continue
raise
remaining -= n
if remaining == 0:
break
buf = buf[n:]

def setblocking(self, blocking):
setblocking(self._handle, blocking)

def _recv(self, size, read=_read):
buf = io.BytesIO()
handle = self._handle
remaining = size
while remaining > 0:
try:
chunk = read(handle, remaining)
except OSError as exc:
if exc.errno == errno.EINTR:
continue
raise
n = len(chunk)
if n == 0:
if remaining == size:
raise EOFError
else:
raise OSError("got end of file during message")
buf.write(chunk)
remaining -= n
return buf

def _send_bytes(self, buf):
# For wire compatibility with 3.2 and lower
n = len(buf)
self._send(struct.pack("!i", n))
# The condition is necessary to avoid "broken pipe" errors
# when sending a 0-length buffer if the other end closed the pipe.
if n > 0:
self._send(buf)

def _recv_bytes(self, maxsize=None):
buf = self._recv(4)
size, = struct.unpack("!i", buf.getvalue())
if maxsize is not None and size > maxsize:
return None
return self._recv(size)

def _poll(self, timeout):
r = wait([self], timeout)
return bool(r)


#
# Public functions
#

class Listener(object):
'''
Returns a listener object.

This is a wrapper for a bound socket which is 'listening' for
connections, or for a Windows named pipe.
'''
def __init__(self, address=None, family=None, backlog=1, authkey=None):
family = (family or (address and address_type(address)) or
default_family)
address = address or arbitrary_address(family)

_validate_family(family)
if family == 'AF_PIPE':
self._listener = PipeListener(address, backlog)
else:
self._listener = SocketListener(address, family, backlog)

if authkey is not None and not isinstance(authkey, bytes):
raise TypeError('authkey should be a byte string')

self._authkey = authkey

def accept(self):
'''
Accept a connection on the bound socket or named pipe of `self`.

Returns a `Connection` object.
'''
if self._listener is None:
raise OSError('listener is closed')
c = self._listener.accept()
if self._authkey:
deliver_challenge(c, self._authkey)
answer_challenge(c, self._authkey)
return c

def close(self):
'''
Close the bound socket or named pipe of `self`.
'''
if self._listener is not None:
self._listener.close()
self._listener = None

address = property(lambda self: self._listener._address)
last_accepted = property(lambda self: self._listener._last_accepted)

def __enter__(self):
return self

def __exit__(self, exc_type, exc_value, exc_tb):
self.close()


def Client(address, family=None, authkey=None):
'''
Returns a connection to the address of a `Listener`
'''
family = family or address_type(address)
_validate_family(family)
if family == 'AF_PIPE':
c = PipeClient(address)
else:
c = SocketClient(address)

if authkey is not None and not isinstance(authkey, bytes):
raise TypeError('authkey should be a byte string')

if authkey is not None:
answer_challenge(c, authkey)
deliver_challenge(c, authkey)

return c


if sys.platform != 'win32':

def Pipe(duplex=True, rnonblock=False, wnonblock=False):
'''
Returns pair of connection objects at either end of a pipe
'''
if duplex:
s1, s2 = socket.socketpair()
s1.setblocking(not rnonblock)
s2.setblocking(not wnonblock)
c1 = Connection(s1.detach())
c2 = Connection(s2.detach())
else:
fd1, fd2 = os.pipe()
if rnonblock:
setblocking(fd1, 0)
if wnonblock:
setblocking(fd2, 0)
c1 = Connection(fd1, writable=False)
c2 = Connection(fd2, readable=False)

return c1, c2

else:
from billiard.forking import duplicate

def Pipe(duplex=True, rnonblock=False, wnonblock=False): # noqa
'''
Returns pair of connection objects at either end of a pipe
'''
address = arbitrary_address('AF_PIPE')
if duplex:
openmode = _winapi.PIPE_ACCESS_DUPLEX
access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE
obsize, ibsize = BUFSIZE, BUFSIZE
else:
openmode = _winapi.PIPE_ACCESS_INBOUND
access = _winapi.GENERIC_WRITE
obsize, ibsize = 0, BUFSIZE

h1 = _winapi.CreateNamedPipe(
address, openmode | _winapi.FILE_FLAG_OVERLAPPED |
_winapi.FILE_FLAG_FIRST_PIPE_INSTANCE,
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
_winapi.PIPE_WAIT,
1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL
)
h2 = _winapi.CreateFile(
address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,
_winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL
)
_winapi.SetNamedPipeHandleState(
h2, _winapi.PIPE_READMODE_MESSAGE, None, None
)

overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True)
_, err = overlapped.GetOverlappedResult(True)
assert err == 0

c1 = PipeConnection(duplicate(h1, inheritable=True), writable=duplex)
c2 = PipeConnection(duplicate(h2, inheritable=True), readable=duplex)
_winapi.CloseHandle(h1)
_winapi.CloseHandle(h2)
return c1, c2

#
# Definitions for connections based on sockets
#


class SocketListener(object):
'''
Representation of a socket which is bound to an address and listening
'''
def __init__(self, address, family, backlog=1):
self._socket = socket.socket(getattr(socket, family))
try:
# SO_REUSEADDR has different semantics on Windows (issue #2550).
if os.name == 'posix':
self._socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self._socket.setblocking(True)
self._socket.bind(address)
self._socket.listen(backlog)
self._address = self._socket.getsockname()
except OSError:
self._socket.close()
raise
self._family = family
self._last_accepted = None

if family == 'AF_UNIX':
self._unlink = Finalize(
self, os.unlink, args=(address, ), exitpriority=0
)
else:
self._unlink = None

def accept(self):
while True:
try:
s, self._last_accepted = self._socket.accept()
except OSError as exc:
if exc.errno == errno.EINTR:
continue
raise
else:
break
s.setblocking(True)
return Connection(s.detach())

def close(self):
self._socket.close()
if self._unlink is not None:
self._unlink()


def SocketClient(address):
'''
Return a connection object connected to the socket given by `address`
'''
family = address_type(address)
with socket.socket(getattr(socket, family)) as s:
s.setblocking(True)
s.connect(address)
return Connection(s.detach())

#
# Definitions for connections based on named pipes
#

if sys.platform == 'win32':

class PipeListener(object):
'''
Representation of a named pipe
'''
def __init__(self, address, backlog=None):
self._address = address
self._handle_queue = [self._new_handle(first=True)]

self._last_accepted = None
sub_debug('listener created with address=%r', self._address)
self.close = Finalize(
self, PipeListener._finalize_pipe_listener,
args=(self._handle_queue, self._address), exitpriority=0
)

def _new_handle(self, first=False):
flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED
if first:
flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
return _winapi.CreateNamedPipe(
self._address, flags,
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
_winapi.PIPE_WAIT,
_winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
_winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL
)

def accept(self):
self._handle_queue.append(self._new_handle())
handle = self._handle_queue.pop(0)
try:
ov = _winapi.ConnectNamedPipe(handle, overlapped=True)
except OSError as e:
if e.winerror != _winapi.ERROR_NO_DATA:
raise
# ERROR_NO_DATA can occur if a client has already connected,
# written data and then disconnected -- see Issue 14725.
else:
try:
_winapi.WaitForMultipleObjects([ov.event], False, INFINITE)
except:
ov.cancel()
_winapi.CloseHandle(handle)
raise
finally:
_, err = ov.GetOverlappedResult(True)
assert err == 0
return PipeConnection(handle)

@staticmethod
def _finalize_pipe_listener(queue, address):
sub_debug('closing listener with address=%r', address)
for handle in queue:
_winapi.CloseHandle(handle)

def PipeClient(address,
errors=(_winapi.ERROR_SEM_TIMEOUT,
_winapi.ERROR_PIPE_BUSY)):
'''
Return a connection object connected to the pipe given by `address`
'''
t = _init_timeout()
while 1:
try:
_winapi.WaitNamedPipe(address, 1000)
h = _winapi.CreateFile(
address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE,
0, _winapi.NULL, _winapi.OPEN_EXISTING,
_winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL
)
except OSError as e:
if e.winerror not in errors or _check_timeout(t):
raise
else:
break
else:
raise

_winapi.SetNamedPipeHandleState(
h, _winapi.PIPE_READMODE_MESSAGE, None, None
)
return PipeConnection(h)

#
# Authentication stuff
#

MESSAGE_LENGTH = 20

CHALLENGE = b'#CHALLENGE#'
WELCOME = b'#WELCOME#'
FAILURE = b'#FAILURE#'


def deliver_challenge(connection, authkey):
import hmac
assert isinstance(authkey, bytes)
message = os.urandom(MESSAGE_LENGTH)
connection.send_bytes(CHALLENGE + message)
digest = hmac.new(authkey, message).digest()
response = connection.recv_bytes(256) # reject large message
if response == digest:
connection.send_bytes(WELCOME)
else:
connection.send_bytes(FAILURE)
raise AuthenticationError('digest received was wrong')


def answer_challenge(connection, authkey):
import hmac
assert isinstance(authkey, bytes)
message = connection.recv_bytes(256) # reject large message
assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message
message = message[len(CHALLENGE):]
digest = hmac.new(authkey, message).digest()
connection.send_bytes(digest)
response = connection.recv_bytes(256) # reject large message
if response != WELCOME:
raise AuthenticationError('digest sent was rejected')

#
# Support for using xmlrpclib for serialization
#


class ConnectionWrapper(object):

def __init__(self, conn, dumps, loads):
self._conn = conn
self._dumps = dumps
self._loads = loads
for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'):
obj = getattr(conn, attr)
setattr(self, attr, obj)

def send(self, obj):
s = self._dumps(obj)
self._conn.send_bytes(s)

def recv(self):
s = self._conn.recv_bytes()
return self._loads(s)


def _xml_dumps(obj):
return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8') # noqa


def _xml_loads(s):
(obj,), method = xmlrpclib.loads(s.decode('utf-8')) # noqa
return obj


class XmlListener(Listener):
def accept(self):
global xmlrpclib
import xmlrpc.client as xmlrpclib # noqa
obj = Listener.accept(self)
return ConnectionWrapper(obj, _xml_dumps, _xml_loads)


def XmlClient(*args, **kwds):
global xmlrpclib
import xmlrpc.client as xmlrpclib # noqa
return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads)

#
# Wait
#

if sys.platform == 'win32':

def _exhaustive_wait(handles, timeout):
# Return ALL handles which are currently signalled. (Only
# returning the first signalled might create starvation issues.)
L = list(handles)
ready = []
while L:
res = _winapi.WaitForMultipleObjects(L, False, timeout)
if res == WAIT_TIMEOUT:
break
elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L):
res -= WAIT_OBJECT_0
elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L):
res -= WAIT_ABANDONED_0
else:
raise RuntimeError('Should not get here')
ready.append(L[res])
L = L[res+1:]
timeout = 0
return ready

_ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED}

def wait(object_list, timeout=None):
'''
Wait till an object in object_list is ready/readable.

Returns list of those objects in object_list which are ready/readable.
'''
if timeout is None:
timeout = INFINITE
elif timeout < 0:
timeout = 0
else:
timeout = int(timeout * 1000 + 0.5)

object_list = list(object_list)
waithandle_to_obj = {}
ov_list = []
ready_objects = set()
ready_handles = set()

try:
for o in object_list:
try:
fileno = getattr(o, 'fileno')
except AttributeError:
waithandle_to_obj[o.__index__()] = o
else:
# start an overlapped read of length zero
try:
ov, err = _winapi.ReadFile(fileno(), 0, True)
except OSError as e:
err = e.winerror
if err not in _ready_errors:
raise
if err == _winapi.ERROR_IO_PENDING:
ov_list.append(ov)
waithandle_to_obj[ov.event] = o
else:
# If o.fileno() is an overlapped pipe handle and
# err == 0 then there is a zero length message
# in the pipe, but it HAS NOT been consumed.
ready_objects.add(o)
timeout = 0

ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout)
finally:
# request that overlapped reads stop
for ov in ov_list:
ov.cancel()

# wait for all overlapped reads to stop
for ov in ov_list:
try:
_, err = ov.GetOverlappedResult(True)
except OSError as e:
err = e.winerror
if err not in _ready_errors:
raise
if err != _winapi.ERROR_OPERATION_ABORTED:
o = waithandle_to_obj[ov.event]
ready_objects.add(o)
if err == 0:
# If o.fileno() is an overlapped pipe handle then
# a zero length message HAS been consumed.
if hasattr(o, '_got_empty_message'):
o._got_empty_message = True

ready_objects.update(waithandle_to_obj[h] for h in ready_handles)
return [oj for oj in object_list if oj in ready_objects]

else:

if hasattr(select, 'poll'):
def _poll(fds, timeout):
if timeout is not None:
timeout = int(timeout * 1000) # timeout is in milliseconds
fd_map = {}
pollster = select.poll()
for fd in fds:
pollster.register(fd, select.POLLIN)
if hasattr(fd, 'fileno'):
fd_map[fd.fileno()] = fd
else:
fd_map[fd] = fd
ls = []
for fd, event in pollster.poll(timeout):
if event & select.POLLNVAL:
raise ValueError('invalid file descriptor %i' % fd)
ls.append(fd_map[fd])
return ls
else:
def _poll(fds, timeout): # noqa
return select.select(fds, [], [], timeout)[0]

def wait(object_list, timeout=None): # noqa
'''
Wait till an object in object_list is ready/readable.

Returns list of those objects in object_list which are ready/readable.
'''
if timeout is not None:
if timeout <= 0:
return _poll(object_list, 0)
else:
deadline = monotonic() + timeout
while True:
try:
return _poll(object_list, timeout)
except OSError as e:
if e.errno != errno.EINTR:
raise
if timeout is not None:
timeout = deadline - monotonic()

+ 249
- 0
thesisenv/lib/python3.6/site-packages/billiard/py3/reduction.py View File

@@ -0,0 +1,249 @@
#
# Module which deals with pickling of objects.
#
# multiprocessing/reduction.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import

import copyreg
import functools
import io
import os
import pickle
import socket
import sys

__all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump']


HAVE_SEND_HANDLE = (sys.platform == 'win32' or
(hasattr(socket, 'CMSG_LEN') and
hasattr(socket, 'SCM_RIGHTS') and
hasattr(socket.socket, 'sendmsg')))

#
# Pickler subclass
#


class ForkingPickler(pickle.Pickler):
'''Pickler subclass used by multiprocessing.'''
_extra_reducers = {}
_copyreg_dispatch_table = copyreg.dispatch_table

def __init__(self, *args):
super().__init__(*args)
self.dispatch_table = self._copyreg_dispatch_table.copy()
self.dispatch_table.update(self._extra_reducers)

@classmethod
def register(cls, type, reduce):
'''Register a reduce function for a type.'''
cls._extra_reducers[type] = reduce

@classmethod
def dumps(cls, obj, protocol=None):
buf = io.BytesIO()
cls(buf, protocol).dump(obj)
return buf.getbuffer()

loads = pickle.loads

register = ForkingPickler.register


def dump(obj, file, protocol=None):
'''Replacement for pickle.dump() using ForkingPickler.'''
ForkingPickler(file, protocol).dump(obj)

#
# Platform specific definitions
#

if sys.platform == 'win32':
# Windows
__all__ += ['DupHandle', 'duplicate', 'steal_handle']
import _winapi

def duplicate(handle, target_process=None, inheritable=False):
'''Duplicate a handle. (target_process is a handle not a pid!)'''
if target_process is None:
target_process = _winapi.GetCurrentProcess()
return _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(), handle, target_process,
0, inheritable, _winapi.DUPLICATE_SAME_ACCESS)

def steal_handle(source_pid, handle):
'''Steal a handle from process identified by source_pid.'''
source_process_handle = _winapi.OpenProcess(
_winapi.PROCESS_DUP_HANDLE, False, source_pid)
try:
return _winapi.DuplicateHandle(
source_process_handle, handle,
_winapi.GetCurrentProcess(), 0, False,
_winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE)
finally:
_winapi.CloseHandle(source_process_handle)

def send_handle(conn, handle, destination_pid):
'''Send a handle over a local connection.'''
dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid)
conn.send(dh)

def recv_handle(conn):
'''Receive a handle over a local connection.'''
return conn.recv().detach()

class DupHandle(object):
'''Picklable wrapper for a handle.'''
def __init__(self, handle, access, pid=None):
if pid is None:
# We just duplicate the handle in the current process and
# let the receiving process steal the handle.
pid = os.getpid()
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid)
try:
self._handle = _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(),
handle, proc, access, False, 0)
finally:
_winapi.CloseHandle(proc)
self._access = access
self._pid = pid

def detach(self):
'''Get the handle. This should only be called once.'''
# retrieve handle from process which currently owns it
if self._pid == os.getpid():
# The handle has already been duplicated for this process.
return self._handle
# We must steal the handle from the process whose pid is self._pid.
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False,
self._pid)
try:
return _winapi.DuplicateHandle(
proc, self._handle, _winapi.GetCurrentProcess(),
self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE)
finally:
_winapi.CloseHandle(proc)

else:
# Unix
__all__ += ['DupFd', 'sendfds', 'recvfds']
import array

# On MacOSX we should acknowledge receipt of fds -- see Issue14669
ACKNOWLEDGE = sys.platform == 'darwin'

def sendfds(sock, fds):
'''Send an array of fds over an AF_UNIX socket.'''
fds = array.array('i', fds)
msg = bytes([len(fds) % 256])
sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)])
if ACKNOWLEDGE and sock.recv(1) != b'A':
raise RuntimeError('did not receive acknowledgement of fd')

def recvfds(sock, size):
'''Receive an array of fds over an AF_UNIX socket.'''
a = array.array('i')
bytes_size = a.itemsize * size
msg, ancdata, flags, addr = sock.recvmsg(
1, socket.CMSG_LEN(bytes_size),
)
if not msg and not ancdata:
raise EOFError
try:
if ACKNOWLEDGE:
sock.send(b'A')
if len(ancdata) != 1:
raise RuntimeError(
'received %d items of ancdata' % len(ancdata),
)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
if len(cmsg_data) % a.itemsize != 0:
raise ValueError
a.frombytes(cmsg_data)
assert len(a) % 256 == msg[0]
return list(a)
except (ValueError, IndexError):
pass
raise RuntimeError('Invalid data received')

def send_handle(conn, handle, destination_pid): # noqa
'''Send a handle over a local connection.'''
fd = conn.fileno()
with socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM) as s:
sendfds(s, [handle])

def recv_handle(conn): # noqa
'''Receive a handle over a local connection.'''
fd = conn.fileno()
with socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM) as s:
return recvfds(s, 1)[0]

def DupFd(fd):
'''Return a wrapper for an fd.'''
from ..forking import Popen
return Popen.duplicate_for_child(fd)

#
# Try making some callable types picklable
#


def _reduce_method(m):
if m.__self__ is None:
return getattr, (m.__class__, m.__func__.__name__)
else:
return getattr, (m.__self__, m.__func__.__name__)


class _C:
def f(self):
pass
register(type(_C().f), _reduce_method)


def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__)
register(type(list.append), _reduce_method_descriptor)
register(type(int.__add__), _reduce_method_descriptor)


def _reduce_partial(p):
return _rebuild_partial, (p.func, p.args, p.keywords or {})


def _rebuild_partial(func, args, keywords):
return functools.partial(func, *args, **keywords)
register(functools.partial, _reduce_partial)

#
# Make sockets picklable
#

if sys.platform == 'win32':

def _reduce_socket(s):
from ..resource_sharer import DupSocket
return _rebuild_socket, (DupSocket(s),)

def _rebuild_socket(ds):
return ds.detach()
register(socket.socket, _reduce_socket)

else:

def _reduce_socket(s): # noqa
df = DupFd(s.fileno())
return _rebuild_socket, (df, s.family, s.type, s.proto)

def _rebuild_socket(df, family, type, proto): # noqa
fd = df.detach()
return socket.socket(family, type, proto, fileno=fd)
register(socket.socket, _reduce_socket)

+ 372
- 0
thesisenv/lib/python3.6/site-packages/billiard/queues.py View File

@@ -0,0 +1,372 @@
#
# Module implementing queues
#
# multiprocessing/queues.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import

import sys
import os
import threading
import collections
import weakref
import errno

from . import Pipe
from ._ext import _billiard
from .compat import get_errno
from .five import monotonic
from .synchronize import Lock, BoundedSemaphore, Semaphore, Condition
from .util import debug, error, info, Finalize, register_after_fork
from .five import Empty, Full
from .forking import assert_spawning

__all__ = ['Queue', 'SimpleQueue', 'JoinableQueue']


class Queue(object):
'''
Queue type using a pipe, buffer and thread
'''
def __init__(self, maxsize=0):
if maxsize <= 0:
maxsize = _billiard.SemLock.SEM_VALUE_MAX
self._maxsize = maxsize
self._reader, self._writer = Pipe(duplex=False)
self._rlock = Lock()
self._opid = os.getpid()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = Lock()
self._sem = BoundedSemaphore(maxsize)
# For use by concurrent.futures
self._ignore_epipe = False

self._after_fork()

if sys.platform != 'win32':
register_after_fork(self, Queue._after_fork)

def __getstate__(self):
assert_spawning(self)
return (self._ignore_epipe, self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid)

def __setstate__(self, state):
(self._ignore_epipe, self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid) = state
self._after_fork()

def _after_fork(self):
debug('Queue._after_fork()')
self._notempty = threading.Condition(threading.Lock())
self._buffer = collections.deque()
self._thread = None
self._jointhread = None
self._joincancelled = False
self._closed = False
self._close = None
self._send = self._writer.send
self._recv = self._reader.recv
self._poll = self._reader.poll

def put(self, obj, block=True, timeout=None):
assert not self._closed
if not self._sem.acquire(block, timeout):
raise Full

with self._notempty:
if self._thread is None:
self._start_thread()
self._buffer.append(obj)
self._notempty.notify()

def get(self, block=True, timeout=None):
if block and timeout is None:
with self._rlock:
res = self._recv()
self._sem.release()
return res

else:
if block:
deadline = monotonic() + timeout
if not self._rlock.acquire(block, timeout):
raise Empty
try:
if block:
timeout = deadline - monotonic()
if timeout < 0 or not self._poll(timeout):
raise Empty
elif not self._poll():
raise Empty
res = self._recv()
self._sem.release()
return res
finally:
self._rlock.release()

def qsize(self):
# Raises NotImplementedError on Mac OSX because
# of broken sem_getvalue()
return self._maxsize - self._sem._semlock._get_value()

def empty(self):
return not self._poll()

def full(self):
return self._sem._semlock._is_zero()

def get_nowait(self):
return self.get(False)

def put_nowait(self, obj):
return self.put(obj, False)

def close(self):
self._closed = True
self._reader.close()
if self._close:
self._close()

def join_thread(self):
debug('Queue.join_thread()')
assert self._closed
if self._jointhread:
self._jointhread()

def cancel_join_thread(self):
debug('Queue.cancel_join_thread()')
self._joincancelled = True
try:
self._jointhread.cancel()
except AttributeError:
pass

def _start_thread(self):
debug('Queue._start_thread()')

# Start thread which transfers data from buffer to pipe
self._buffer.clear()
self._thread = threading.Thread(
target=Queue._feed,
args=(self._buffer, self._notempty, self._send,
self._wlock, self._writer.close, self._ignore_epipe),
name='QueueFeederThread'
)
self._thread.daemon = True

debug('doing self._thread.start()')
self._thread.start()
debug('... done self._thread.start()')

# On process exit we will wait for data to be flushed to pipe.
#
# However, if this process created the queue then all
# processes which use the queue will be descendants of this
# process. Therefore waiting for the queue to be flushed
# is pointless once all the child processes have been joined.
created_by_this_process = (self._opid == os.getpid())
if not self._joincancelled and not created_by_this_process:
self._jointhread = Finalize(
self._thread, Queue._finalize_join,
[weakref.ref(self._thread)],
exitpriority=-5
)

# Send sentinel to the thread queue object when garbage collected
self._close = Finalize(
self, Queue._finalize_close,
[self._buffer, self._notempty],
exitpriority=10
)

@staticmethod
def _finalize_join(twr):
debug('joining queue thread')
thread = twr()
if thread is not None:
thread.join()
debug('... queue thread joined')
else:
debug('... queue thread already dead')

@staticmethod
def _finalize_close(buffer, notempty):
debug('telling queue thread to quit')
with notempty:
buffer.append(_sentinel)
notempty.notify()

@staticmethod
def _feed(buffer, notempty, send, writelock, close, ignore_epipe):
debug('starting thread to feed data to pipe')
from .util import is_exiting

ncond = notempty
nwait = notempty.wait
bpopleft = buffer.popleft
sentinel = _sentinel
if sys.platform != 'win32':
wlock = writelock
else:
wlock = None

try:
while 1:
with ncond:
if not buffer:
nwait()
try:
while 1:
obj = bpopleft()
if obj is sentinel:
debug('feeder thread got sentinel -- exiting')
close()
return

if wlock is None:
send(obj)
else:
with wlock:
send(obj)
except IndexError:
pass
except Exception as exc:
if ignore_epipe and get_errno(exc) == errno.EPIPE:
return
# Since this runs in a daemon thread the resources it uses
# may be become unusable while the process is cleaning up.
# We ignore errors which happen after the process has
# started to cleanup.
try:
if is_exiting():
info('error in queue thread: %r', exc, exc_info=True)
else:
if not error('error in queue thread: %r', exc,
exc_info=True):
import traceback
traceback.print_exc()
except Exception:
pass

_sentinel = object()


class JoinableQueue(Queue):
'''
A queue type which also supports join() and task_done() methods

Note that if you do not call task_done() for each finished task then
eventually the counter's semaphore may overflow causing Bad Things
to happen.
'''

def __init__(self, maxsize=0):
Queue.__init__(self, maxsize)
self._unfinished_tasks = Semaphore(0)
self._cond = Condition()

def __getstate__(self):
return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)

def __setstate__(self, state):
Queue.__setstate__(self, state[:-2])
self._cond, self._unfinished_tasks = state[-2:]

def put(self, obj, block=True, timeout=None):
assert not self._closed
if not self._sem.acquire(block, timeout):
raise Full

with self._notempty:
with self._cond:
if self._thread is None:
self._start_thread()
self._buffer.append(obj)
self._unfinished_tasks.release()
self._notempty.notify()

def task_done(self):
with self._cond:
if not self._unfinished_tasks.acquire(False):
raise ValueError('task_done() called too many times')
if self._unfinished_tasks._semlock._is_zero():
self._cond.notify_all()

def join(self):
with self._cond:
if not self._unfinished_tasks._semlock._is_zero():
self._cond.wait()


class _SimpleQueue(object):
'''
Simplified Queue type -- really just a locked pipe
'''

def __init__(self, rnonblock=False, wnonblock=False):
self._reader, self._writer = Pipe(
duplex=False, rnonblock=rnonblock, wnonblock=wnonblock,
)
self._poll = self._reader.poll
self._rlock = self._wlock = None
self._make_methods()

def empty(self):
return not self._poll()

def __getstate__(self):
assert_spawning(self)
return (self._reader, self._writer, self._rlock, self._wlock)

def __setstate__(self, state):
(self._reader, self._writer, self._rlock, self._wlock) = state
self._make_methods()

def _make_methods(self):
recv = self._reader.recv
try:
recv_payload = self._reader.recv_payload
except AttributeError:
recv_payload = self._reader.recv_bytes
rlock = self._rlock

if rlock is not None:
def get():
with rlock:
return recv()
self.get = get

def get_payload():
with rlock:
return recv_payload()
self.get_payload = get_payload
else:
self.get = recv
self.get_payload = recv_payload

if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
self.put = self._writer.send
else:
send = self._writer.send
wlock = self._wlock

def put(obj):
with wlock:
return send(obj)
self.put = put


class SimpleQueue(_SimpleQueue):

def __init__(self):
self._reader, self._writer = Pipe(duplex=False)
self._rlock = Lock()
self._wlock = Lock() if sys.platform != 'win32' else None
self._make_methods()

+ 10
- 0
thesisenv/lib/python3.6/site-packages/billiard/reduction.py View File

@@ -0,0 +1,10 @@
from __future__ import absolute_import

import sys

if sys.version_info[0] == 3:
from .py3 import reduction
else:
from .py2 import reduction # noqa

sys.modules[__name__] = reduction

+ 248
- 0
thesisenv/lib/python3.6/site-packages/billiard/sharedctypes.py View File

@@ -0,0 +1,248 @@
#
# Module which supports allocation of ctypes objects from shared memory
#
# multiprocessing/sharedctypes.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import

import ctypes
import weakref

from . import heap, RLock
from .five import int_types
from .forking import assert_spawning
from .reduction import ForkingPickler

__all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized']

typecode_to_type = {
'c': ctypes.c_char, 'u': ctypes.c_wchar,
'b': ctypes.c_byte, 'B': ctypes.c_ubyte,
'h': ctypes.c_short, 'H': ctypes.c_ushort,
'i': ctypes.c_int, 'I': ctypes.c_uint,
'l': ctypes.c_long, 'L': ctypes.c_ulong,
'f': ctypes.c_float, 'd': ctypes.c_double
}


def _new_value(type_):
size = ctypes.sizeof(type_)
wrapper = heap.BufferWrapper(size)
return rebuild_ctype(type_, wrapper, None)


def RawValue(typecode_or_type, *args):
'''
Returns a ctypes object allocated from shared memory
'''
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
obj = _new_value(type_)
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
obj.__init__(*args)
return obj


def RawArray(typecode_or_type, size_or_initializer):
'''
Returns a ctypes array allocated from shared memory
'''
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
if isinstance(size_or_initializer, int_types):
type_ = type_ * size_or_initializer
obj = _new_value(type_)
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
return obj
else:
type_ = type_ * len(size_or_initializer)
result = _new_value(type_)
result.__init__(*size_or_initializer)
return result


def Value(typecode_or_type, *args, **kwds):
'''
Return a synchronization wrapper for a Value
'''
lock = kwds.pop('lock', None)
if kwds:
raise ValueError(
'unrecognized keyword argument(s): %s' % list(kwds.keys()))
obj = RawValue(typecode_or_type, *args)
if lock is False:
return obj
if lock in (True, None):
lock = RLock()
if not hasattr(lock, 'acquire'):
raise AttributeError("'%r' has no method 'acquire'" % lock)
return synchronized(obj, lock)


def Array(typecode_or_type, size_or_initializer, **kwds):
'''
Return a synchronization wrapper for a RawArray
'''
lock = kwds.pop('lock', None)
if kwds:
raise ValueError(
'unrecognized keyword argument(s): %s' % list(kwds.keys()))
obj = RawArray(typecode_or_type, size_or_initializer)
if lock is False:
return obj
if lock in (True, None):
lock = RLock()
if not hasattr(lock, 'acquire'):
raise AttributeError("'%r' has no method 'acquire'" % lock)
return synchronized(obj, lock)


def copy(obj):
new_obj = _new_value(type(obj))
ctypes.pointer(new_obj)[0] = obj
return new_obj


def synchronized(obj, lock=None):
assert not isinstance(obj, SynchronizedBase), 'object already synchronized'

if isinstance(obj, ctypes._SimpleCData):
return Synchronized(obj, lock)
elif isinstance(obj, ctypes.Array):
if obj._type_ is ctypes.c_char:
return SynchronizedString(obj, lock)
return SynchronizedArray(obj, lock)
else:
cls = type(obj)
try:
scls = class_cache[cls]
except KeyError:
names = [field[0] for field in cls._fields_]
d = dict((name, make_property(name)) for name in names)
classname = 'Synchronized' + cls.__name__
scls = class_cache[cls] = type(classname, (SynchronizedBase,), d)
return scls(obj, lock)

#
# Functions for pickling/unpickling
#


def reduce_ctype(obj):
assert_spawning(obj)
if isinstance(obj, ctypes.Array):
return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_)
else:
return rebuild_ctype, (type(obj), obj._wrapper, None)


def rebuild_ctype(type_, wrapper, length):
if length is not None:
type_ = type_ * length
ForkingPickler.register(type_, reduce_ctype)
obj = type_.from_address(wrapper.get_address())
obj._wrapper = wrapper
return obj

#
# Function to create properties
#


def make_property(name):
try:
return prop_cache[name]
except KeyError:
d = {}
exec(template % ((name, ) * 7), d)
prop_cache[name] = d[name]
return d[name]

template = '''
def get%s(self):
self.acquire()
try:
return self._obj.%s
finally:
self.release()
def set%s(self, value):
self.acquire()
try:
self._obj.%s = value
finally:
self.release()
%s = property(get%s, set%s)
'''

prop_cache = {}
class_cache = weakref.WeakKeyDictionary()

#
# Synchronized wrappers
#


class SynchronizedBase(object):

def __init__(self, obj, lock=None):
self._obj = obj
self._lock = lock or RLock()
self.acquire = self._lock.acquire
self.release = self._lock.release

def __reduce__(self):
assert_spawning(self)
return synchronized, (self._obj, self._lock)

def get_obj(self):
return self._obj

def get_lock(self):
return self._lock

def __repr__(self):
return '<%s wrapper for %s>' % (type(self).__name__, self._obj)


class Synchronized(SynchronizedBase):
value = make_property('value')


class SynchronizedArray(SynchronizedBase):

def __len__(self):
return len(self._obj)

def __getitem__(self, i):
self.acquire()
try:
return self._obj[i]
finally:
self.release()

def __setitem__(self, i, value):
self.acquire()
try:
self._obj[i] = value
finally:
self.release()

def __getslice__(self, start, stop):
self.acquire()
try:
return self._obj[start:stop]
finally:
self.release()

def __setslice__(self, start, stop, values):
self.acquire()
try:
self._obj[start:stop] = values
finally:
self.release()


class SynchronizedString(SynchronizedArray):
value = make_property('value')
raw = make_property('raw')

+ 449
- 0
thesisenv/lib/python3.6/site-packages/billiard/synchronize.py View File

@@ -0,0 +1,449 @@
#
# Module implementing synchronization primitives
#
# multiprocessing/synchronize.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import

import itertools
import os
import signal
import sys
import threading


from ._ext import _billiard, ensure_SemLock
from .five import range, monotonic
from .process import current_process
from .util import Finalize, register_after_fork, debug
from .forking import assert_spawning, Popen
from .compat import bytes, closerange

__all__ = [
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event',
]

# Try to import the mp.synchronize module cleanly, if it fails
# raise ImportError for platforms lacking a working sem_open implementation.
# See issue 3770
ensure_SemLock()

#
# Constants
#

RECURSIVE_MUTEX, SEMAPHORE = list(range(2))
SEM_VALUE_MAX = _billiard.SemLock.SEM_VALUE_MAX

try:
sem_unlink = _billiard.SemLock.sem_unlink
except AttributeError: # pragma: no cover
try:
# Py3.4+ implements sem_unlink and the semaphore must be named
from _multiprocessing import sem_unlink # noqa
except ImportError:
sem_unlink = None # noqa

#
# Base class for semaphores and mutexes; wraps `_billiard.SemLock`
#


def _semname(sl):
try:
return sl.name
except AttributeError:
pass


class SemLock(object):
_counter = itertools.count()

def __init__(self, kind, value, maxvalue):
from .forking import _forking_is_enabled
unlink_immediately = _forking_is_enabled or sys.platform == 'win32'
if sem_unlink:
sl = self._semlock = _billiard.SemLock(
kind, value, maxvalue, self._make_name(), unlink_immediately)
else:
sl = self._semlock = _billiard.SemLock(kind, value, maxvalue)

debug('created semlock with handle %s', sl.handle)
self._make_methods()

if sem_unlink:

if sys.platform != 'win32':
def _after_fork(obj):
obj._semlock._after_fork()
register_after_fork(self, _after_fork)

if _semname(self._semlock) is not None:
# We only get here if we are on Unix with forking
# disabled. When the object is garbage collected or the
# process shuts down we unlink the semaphore name
Finalize(self, sem_unlink, (self._semlock.name,),
exitpriority=0)
# In case of abnormal termination unlink semaphore name
_cleanup_semaphore_if_leaked(self._semlock.name)

def _make_methods(self):
self.acquire = self._semlock.acquire
self.release = self._semlock.release

def __enter__(self):
return self._semlock.__enter__()

def __exit__(self, *args):
return self._semlock.__exit__(*args)

def __getstate__(self):
assert_spawning(self)
sl = self._semlock
state = (Popen.duplicate_for_child(sl.handle), sl.kind, sl.maxvalue)
try:
state += (sl.name, )
except AttributeError:
pass
return state

def __setstate__(self, state):
self._semlock = _billiard.SemLock._rebuild(*state)
debug('recreated blocker with handle %r', state[0])
self._make_methods()

@staticmethod
def _make_name():
return '/%s-%s-%s' % (current_process()._semprefix,
os.getpid(), next(SemLock._counter))


class Semaphore(SemLock):

def __init__(self, value=1):
SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX)

def get_value(self):
return self._semlock._get_value()

def __repr__(self):
try:
value = self._semlock._get_value()
except Exception:
value = 'unknown'
return '<Semaphore(value=%s)>' % value


class BoundedSemaphore(Semaphore):

def __init__(self, value=1):
SemLock.__init__(self, SEMAPHORE, value, value)

def __repr__(self):
try:
value = self._semlock._get_value()
except Exception:
value = 'unknown'
return '<BoundedSemaphore(value=%s, maxvalue=%s)>' % \
(value, self._semlock.maxvalue)


class Lock(SemLock):
'''
Non-recursive lock.
'''

def __init__(self):
SemLock.__init__(self, SEMAPHORE, 1, 1)

def __repr__(self):
try:
if self._semlock._is_mine():
name = current_process().name
if threading.currentThread().name != 'MainThread':
name += '|' + threading.currentThread().name
elif self._semlock._get_value() == 1:
name = 'None'
elif self._semlock._count() > 0:
name = 'SomeOtherThread'
else:
name = 'SomeOtherProcess'
except Exception:
name = 'unknown'
return '<Lock(owner=%s)>' % name


class RLock(SemLock):
'''
Recursive lock
'''

def __init__(self):
SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1)

def __repr__(self):
try:
if self._semlock._is_mine():
name = current_process().name
if threading.currentThread().name != 'MainThread':
name += '|' + threading.currentThread().name
count = self._semlock._count()
elif self._semlock._get_value() == 1:
name, count = 'None', 0
elif self._semlock._count() > 0:
name, count = 'SomeOtherThread', 'nonzero'
else:
name, count = 'SomeOtherProcess', 'nonzero'
except Exception:
name, count = 'unknown', 'unknown'
return '<RLock(%s, %s)>' % (name, count)


class Condition(object):
'''
Condition variable
'''

def __init__(self, lock=None):
self._lock = lock or RLock()
self._sleeping_count = Semaphore(0)
self._woken_count = Semaphore(0)
self._wait_semaphore = Semaphore(0)
self._make_methods()

def __getstate__(self):
assert_spawning(self)
return (self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore)

def __setstate__(self, state):
(self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore) = state
self._make_methods()

def __enter__(self):
return self._lock.__enter__()

def __exit__(self, *args):
return self._lock.__exit__(*args)

def _make_methods(self):
self.acquire = self._lock.acquire
self.release = self._lock.release

def __repr__(self):
try:
num_waiters = (self._sleeping_count._semlock._get_value() -
self._woken_count._semlock._get_value())
except Exception:
num_waiters = 'unkown'
return '<Condition(%s, %s)>' % (self._lock, num_waiters)

def wait(self, timeout=None):
assert self._lock._semlock._is_mine(), \
'must acquire() condition before using wait()'

# indicate that this thread is going to sleep
self._sleeping_count.release()

# release lock
count = self._lock._semlock._count()
for i in range(count):
self._lock.release()

try:
# wait for notification or timeout
ret = self._wait_semaphore.acquire(True, timeout)
finally:
# indicate that this thread has woken
self._woken_count.release()

# reacquire lock
for i in range(count):
self._lock.acquire()
return ret

def notify(self):
assert self._lock._semlock._is_mine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(False)

# to take account of timeouts since last notify() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res

if self._sleeping_count.acquire(False): # try grabbing a sleeper
self._wait_semaphore.release() # wake up one sleeper
self._woken_count.acquire() # wait for sleeper to wake

# rezero _wait_semaphore in case a timeout just happened
self._wait_semaphore.acquire(False)

def notify_all(self):
assert self._lock._semlock._is_mine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(False)

# to take account of timeouts since last notify*() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res

sleepers = 0
while self._sleeping_count.acquire(False):
self._wait_semaphore.release() # wake up one sleeper
sleepers += 1

if sleepers:
for i in range(sleepers):
self._woken_count.acquire() # wait for a sleeper to wake

# rezero wait_semaphore in case some timeouts just happened
while self._wait_semaphore.acquire(False):
pass

def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = monotonic() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - monotonic()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result


class Event(object):

def __init__(self):
self._cond = Condition(Lock())
self._flag = Semaphore(0)

def is_set(self):
self._cond.acquire()
try:
if self._flag.acquire(False):
self._flag.release()
return True
return False
finally:
self._cond.release()

def set(self):
self._cond.acquire()
try:
self._flag.acquire(False)
self._flag.release()
self._cond.notify_all()
finally:
self._cond.release()

def clear(self):
self._cond.acquire()
try:
self._flag.acquire(False)
finally:
self._cond.release()

def wait(self, timeout=None):
self._cond.acquire()
try:
if self._flag.acquire(False):
self._flag.release()
else:
self._cond.wait(timeout)

if self._flag.acquire(False):
self._flag.release()
return True
return False
finally:
self._cond.release()


if sys.platform != 'win32':
#
# Protection against unlinked semaphores if the program ends abnormally
# and forking has been disabled.
#

def _cleanup_semaphore_if_leaked(name):
name = name.encode('ascii') + bytes('\0', 'ascii')
if len(name) > 512:
# posix guarantees that writes to a pipe of less than PIPE_BUF
# bytes are atomic, and that PIPE_BUF >= 512
raise ValueError('name too long')
fd = _get_unlinkfd()
bits = os.write(fd, name)
assert bits == len(name)

def _get_unlinkfd():
cp = current_process()
if cp._unlinkfd is None:
r, w = os.pipe()
pid = os.fork()
if pid == 0:
try:
from setproctitle import setproctitle
setproctitle("[sem_cleanup for %r]" % cp.pid)
except:
pass

# Fork a process which will survive until all other processes
# which have a copy of the write end of the pipe have exited.
# The forked process just collects names of semaphores until
# EOF is indicated. Then it tries unlinking all the names it
# has collected.
_collect_names_then_unlink(r)
os._exit(0)
os.close(r)
cp._unlinkfd = w
return cp._unlinkfd

def _collect_names_then_unlink(r):
# protect the process from ^C and "killall python" etc
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)

# close all fds except r
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
closerange(0, r)
closerange(r + 1, MAXFD)

# collect data written to pipe
data = []
while 1:
try:
s = os.read(r, 512)
except:
# XXX IO lock might be held at fork, so don't try
# printing unexpected exception - see issue 6721
pass
else:
if not s:
break
data.append(s)

# attempt to unlink each collected name
for name in bytes('', 'ascii').join(data).split(bytes('\0', 'ascii')):
try:
sem_unlink(name.decode('ascii'))
except:
# XXX IO lock might be held at fork, so don't try
# printing unexpected exception - see issue 6721
pass

+ 21
- 0
thesisenv/lib/python3.6/site-packages/billiard/tests/__init__.py View File

@@ -0,0 +1,21 @@
from __future__ import absolute_import

import atexit


def teardown():
# Workaround for multiprocessing bug where logging
# is attempted after global already collected at shutdown.
cancelled = set()
try:
import multiprocessing.util
cancelled.add(multiprocessing.util._exit_function)
except (AttributeError, ImportError):
pass

try:
atexit._exithandlers[:] = [
e for e in atexit._exithandlers if e[0] not in cancelled
]
except AttributeError:
pass

+ 85
- 0
thesisenv/lib/python3.6/site-packages/billiard/tests/compat.py View File

@@ -0,0 +1,85 @@
from __future__ import absolute_import

import sys


class WarningMessage(object):

"""Holds the result of a single showwarning() call."""

_WARNING_DETAILS = ('message', 'category', 'filename', 'lineno', 'file',
'line')

def __init__(self, message, category, filename, lineno, file=None,
line=None):
local_values = locals()
for attr in self._WARNING_DETAILS:
setattr(self, attr, local_values[attr])

self._category_name = category and category.__name__ or None

def __str__(self):
return ('{message : %r, category : %r, filename : %r, lineno : %s, '
'line : %r}' % (self.message, self._category_name,
self.filename, self.lineno, self.line))


class catch_warnings(object):

"""A context manager that copies and restores the warnings filter upon
exiting the context.

The 'record' argument specifies whether warnings should be captured by a
custom implementation of warnings.showwarning() and be appended to a list
returned by the context manager. Otherwise None is returned by the context
manager. The objects appended to the list are arguments whose attributes
mirror the arguments to showwarning().

The 'module' argument is to specify an alternative module to the module
named 'warnings' and imported under that name. This argument is only
useful when testing the warnings module itself.

"""

def __init__(self, record=False, module=None):
"""Specify whether to record warnings and if an alternative module
should be used other than sys.modules['warnings'].

For compatibility with Python 3.0, please consider all arguments to be
keyword-only.

"""
self._record = record
self._module = module is None and sys.modules['warnings'] or module
self._entered = False

def __repr__(self):
args = []
if self._record:
args.append('record=True')
if self._module is not sys.modules['warnings']:
args.append('module=%r' % self._module)
name = type(self).__name__
return '%s(%s)' % (name, ', '.join(args))

def __enter__(self):
if self._entered:
raise RuntimeError('Cannot enter %r twice' % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
log = []

def showwarning(*args, **kwargs):
log.append(WarningMessage(*args, **kwargs))

self._module.showwarning = showwarning
return log

def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError('Cannot exit %r without entering first' % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning

+ 108
- 0
thesisenv/lib/python3.6/site-packages/billiard/tests/test_common.py View File

@@ -0,0 +1,108 @@
from __future__ import absolute_import

import os
import signal
import sys

from contextlib import contextmanager
from time import time

from nose import SkipTest
from billiard.common import (
_shutdown_cleanup,
reset_signals,
restart_state,
)

from .utils import Case

try:
from unittest.mock import Mock, call, patch
except ImportError:
from mock import Mock, call, patch # noqa


def signo(name):
return getattr(signal, name)


@contextmanager
def termsigs(default, full):
from billiard import common
prev_def, common.TERMSIGS_DEFAULT = common.TERMSIGS_DEFAULT, default
prev_full, common.TERMSIGS_FULL = common.TERMSIGS_FULL, full
try:
yield
finally:
common.TERMSIGS_DEFAULT, common.TERMSIGS_FULL = prev_def, prev_full


class test_reset_signals(Case):

def setUp(self):
if sys.platform == 'win32':
raise SkipTest('win32: skip')

def test_shutdown_handler(self):
with patch('sys.exit') as exit:
_shutdown_cleanup(15, Mock())
self.assertTrue(exit.called)
self.assertEqual(os.WTERMSIG(exit.call_args[0][0]), 15)

def test_does_not_reset_ignored_signal(self, sigs=['SIGTERM']):
with self.assert_context(sigs, [], signal.SIG_IGN) as (_, SET):
self.assertFalse(SET.called)

def test_does_not_reset_if_current_is_None(self, sigs=['SIGTERM']):
with self.assert_context(sigs, [], None) as (_, SET):
self.assertFalse(SET.called)

def test_resets_for_SIG_DFL(self, sigs=['SIGTERM', 'SIGINT', 'SIGUSR1']):
with self.assert_context(sigs, [], signal.SIG_DFL) as (_, SET):
SET.assert_has_calls([
call(signo(sig), _shutdown_cleanup) for sig in sigs
])

def test_resets_for_obj(self, sigs=['SIGTERM', 'SIGINT', 'SIGUSR1']):
with self.assert_context(sigs, [], object()) as (_, SET):
SET.assert_has_calls([
call(signo(sig), _shutdown_cleanup) for sig in sigs
])

def test_handles_errors(self, sigs=['SIGTERM']):
for exc in (OSError(), AttributeError(),
ValueError(), RuntimeError()):
with self.assert_context(sigs, [], signal.SIG_DFL, exc) as (_, S):
self.assertTrue(S.called)

@contextmanager
def assert_context(self, default, full, get_returns=None, set_effect=None):
with termsigs(default, full):
with patch('signal.getsignal') as GET:
with patch('signal.signal') as SET:
GET.return_value = get_returns
SET.side_effect = set_effect
reset_signals()
GET.assert_has_calls([
call(signo(sig)) for sig in default
])
yield GET, SET


class test_restart_state(Case):

def test_raises(self):
s = restart_state(100, 1) # max 100 restarts in 1 second.
s.R = 99
s.step()
with self.assertRaises(s.RestartFreqExceeded):
s.step()

def test_time_passed_resets_counter(self):
s = restart_state(100, 10)
s.R, s.T = 100, time()
with self.assertRaises(s.RestartFreqExceeded):
s.step()
s.R, s.T = 100, time()
s.step(time() + 20)
self.assertEqual(s.R, 1)

+ 12
- 0
thesisenv/lib/python3.6/site-packages/billiard/tests/test_package.py View File

@@ -0,0 +1,12 @@
from __future__ import absolute_import

import billiard

from .utils import Case


class test_billiard(Case):

def test_has_version(self):
self.assertTrue(billiard.__version__)
self.assertIsInstance(billiard.__version__, str)

+ 145
- 0
thesisenv/lib/python3.6/site-packages/billiard/tests/utils.py View File

@@ -0,0 +1,145 @@
from __future__ import absolute_import

import re
import sys
import warnings

try:
import unittest # noqa
unittest.skip
from unittest.util import safe_repr, unorderable_list_difference
except AttributeError:
import unittest2 as unittest # noqa
from unittest2.util import safe_repr, unorderable_list_difference # noqa

from billiard.five import string_t, items, values

from .compat import catch_warnings

# -- adds assertWarns from recent unittest2, not in Python 2.7.


class _AssertRaisesBaseContext(object):

def __init__(self, expected, test_case, callable_obj=None,
expected_regex=None):
self.expected = expected
self.failureException = test_case.failureException
self.obj_name = None
if isinstance(expected_regex, string_t):
expected_regex = re.compile(expected_regex)
self.expected_regex = expected_regex


class _AssertWarnsContext(_AssertRaisesBaseContext):
"""A context manager used to implement TestCase.assertWarns* methods."""

def __enter__(self):
# The __warningregistry__'s need to be in a pristine state for tests
# to work properly.
warnings.resetwarnings()
for v in values(sys.modules):
if getattr(v, '__warningregistry__', None):
v.__warningregistry__ = {}
self.warnings_manager = catch_warnings(record=True)
self.warnings = self.warnings_manager.__enter__()
warnings.simplefilter('always', self.expected)
return self

def __exit__(self, exc_type, exc_value, tb):
self.warnings_manager.__exit__(exc_type, exc_value, tb)
if exc_type is not None:
# let unexpected exceptions pass through
return
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
first_matching = None
for m in self.warnings:
w = m.message
if not isinstance(w, self.expected):
continue
if first_matching is None:
first_matching = w
if (self.expected_regex is not None and
not self.expected_regex.search(str(w))):
continue
# store warning for later retrieval
self.warning = w
self.filename = m.filename
self.lineno = m.lineno
return
# Now we simply try to choose a helpful failure message
if first_matching is not None:
raise self.failureException(
'%r does not match %r' % (
self.expected_regex.pattern, str(first_matching)))
if self.obj_name:
raise self.failureException(
'%s not triggered by %s' % (exc_name, self.obj_name))
else:
raise self.failureException('%s not triggered' % exc_name)


class Case(unittest.TestCase):

def assertWarns(self, expected_warning):
return _AssertWarnsContext(expected_warning, self, None)

def assertWarnsRegex(self, expected_warning, expected_regex):
return _AssertWarnsContext(expected_warning, self,
None, expected_regex)

def assertDictContainsSubset(self, expected, actual, msg=None):
missing, mismatched = [], []

for key, value in items(expected):
if key not in actual:
missing.append(key)
elif value != actual[key]:
mismatched.append('%s, expected: %s, actual: %s' % (
safe_repr(key), safe_repr(value),
safe_repr(actual[key])))

if not (missing or mismatched):
return

standard_msg = ''
if missing:
standard_msg = 'Missing: %s' % ','.join(map(safe_repr, missing))

if mismatched:
if standard_msg:
standard_msg += '; '
standard_msg += 'Mismatched values: %s' % (
','.join(mismatched))

self.fail(self._formatMessage(msg, standard_msg))

def assertItemsEqual(self, expected_seq, actual_seq, msg=None):
missing = unexpected = None
try:
expected = sorted(expected_seq)
actual = sorted(actual_seq)
except TypeError:
# Unsortable items (example: set(), complex(), ...)
expected = list(expected_seq)
actual = list(actual_seq)
missing, unexpected = unorderable_list_difference(
expected, actual)
else:
return self.assertSequenceEqual(expected, actual, msg=msg)

errors = []
if missing:
errors.append(
'Expected, but missing:\n %s' % (safe_repr(missing), ),
)
if unexpected:
errors.append(
'Unexpected, but present:\n %s' % (safe_repr(unexpected), ),
)
if errors:
standardMsg = '\n'.join(errors)
self.fail(self._formatMessage(msg, standardMsg))

+ 152
- 0
thesisenv/lib/python3.6/site-packages/billiard/util.py View File

@@ -0,0 +1,152 @@
#
# Module providing various facilities to other parts of the package
#
# billiard/util.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import

import errno
import functools
import atexit

from multiprocessing.util import ( # noqa
_afterfork_registry,
_afterfork_counter,
_exit_function,
_finalizer_registry,
_finalizer_counter,
Finalize,
ForkAwareLocal,
ForkAwareThreadLock,
get_temp_dir,
is_exiting,
register_after_fork,
_run_after_forkers,
_run_finalizers,
)

from .compat import get_errno

__all__ = [
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
'SUBDEBUG', 'SUBWARNING',
]

#
# Logging
#

NOTSET = 0
SUBDEBUG = 5
DEBUG = 10
INFO = 20
SUBWARNING = 25
ERROR = 40

LOGGER_NAME = 'multiprocessing'
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'

_logger = None
_log_to_stderr = False


def sub_debug(msg, *args, **kwargs):
if _logger:
_logger.log(SUBDEBUG, msg, *args, **kwargs)


def debug(msg, *args, **kwargs):
if _logger:
_logger.log(DEBUG, msg, *args, **kwargs)
return True
return False


def info(msg, *args, **kwargs):
if _logger:
_logger.log(INFO, msg, *args, **kwargs)
return True
return False


def sub_warning(msg, *args, **kwargs):
if _logger:
_logger.log(SUBWARNING, msg, *args, **kwargs)
return True
return False


def error(msg, *args, **kwargs):
if _logger:
_logger.log(ERROR, msg, *args, **kwargs)
return True
return False


def get_logger():
'''
Returns logger used by multiprocessing
'''
global _logger
import logging

logging._acquireLock()
try:
if not _logger:

_logger = logging.getLogger(LOGGER_NAME)
_logger.propagate = 0
logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
logging.addLevelName(SUBWARNING, 'SUBWARNING')

# XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'):
atexit.unregister(_exit_function)
atexit.register(_exit_function)
else:
atexit._exithandlers.remove((_exit_function, (), {}))
atexit._exithandlers.append((_exit_function, (), {}))
finally:
logging._releaseLock()

return _logger


def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
global _log_to_stderr
import logging

logger = get_logger()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)

if level:
logger.setLevel(level)
_log_to_stderr = True
return _logger


def _eintr_retry(func):
'''
Automatic retry after EINTR.
'''

@functools.wraps(func)
def wrapped(*args, **kwargs):
while 1:
try:
return func(*args, **kwargs)
except OSError as exc:
if get_errno(exc) != errno.EINTR:
raise
return wrapped

+ 428
- 0
thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/DESCRIPTION.rst View File

@@ -0,0 +1,428 @@
=================================
celery - Distributed Task Queue
=================================

.. image:: http://cloud.github.com/downloads/celery/celery/celery_128.png

:Version: 3.1.26 (Cipater)
:Web: http://celeryproject.org/
:Download: http://pypi.python.org/pypi/celery/
:Source: http://github.com/celery/celery/
:Keywords: task queue, job queue, asynchronous, async, rabbitmq, amqp, redis,
python, webhooks, queue, distributed

--

What is a Task Queue?
=====================

Task queues are used as a mechanism to distribute work across threads or
machines.

A task queue's input is a unit of work, called a task, dedicated worker
processes then constantly monitor the queue for new work to perform.

Celery communicates via messages, usually using a broker
to mediate between clients and workers. To initiate a task a client puts a
message on the queue, the broker then delivers the message to a worker.

A Celery system can consist of multiple workers and brokers, giving way
to high availability and horizontal scaling.

Celery is a library written in Python, but the protocol can be implemented in
any language. So far there's RCelery_ for the Ruby programming language, and a
`PHP client`, but language interoperability can also be achieved
by using webhooks.

.. _RCelery: http://leapfrogdevelopment.github.com/rcelery/
.. _`PHP client`: https://github.com/gjedeer/celery-php
.. _`using webhooks`:
http://docs.celeryproject.org/en/latest/userguide/remote-tasks.html

What do I need?
===============

Celery version 3.0 runs on,

- Python (2.5, 2.6, 2.7, 3.2, 3.3)
- PyPy (1.8, 1.9)
- Jython (2.5, 2.7).

This is the last version to support Python 2.5,
and from Celery 3.1, Python 2.6 or later is required.
The last version to support Python 2.4 was Celery series 2.2.

*Celery* is usually used with a message broker to send and receive messages.
The RabbitMQ, Redis transports are feature complete,
but there's also experimental support for a myriad of other solutions, including
using SQLite for local development.

*Celery* can run on a single machine, on multiple machines, or even
across datacenters.

Get Started
===========

If this is the first time you're trying to use Celery, or you are
new to Celery 3.0 coming from previous versions then you should read our
getting started tutorials:

- `First steps with Celery`_

Tutorial teaching you the bare minimum needed to get started with Celery.

- `Next steps`_

A more complete overview, showing more features.

.. _`First steps with Celery`:
http://docs.celeryproject.org/en/latest/getting-started/first-steps-with-celery.html

.. _`Next steps`:
http://docs.celeryproject.org/en/latest/getting-started/next-steps.html

Celery is...
============

- **Simple**

Celery is easy to use and maintain, and does *not need configuration files*.

It has an active, friendly community you can talk to for support,
including a `mailing-list`_ and and an IRC channel.

Here's one of the simplest applications you can make::

from celery import Celery

app = Celery('hello', broker='amqp://guest@localhost//')

@app.task
def hello():
return 'hello world'

- **Highly Available**

Workers and clients will automatically retry in the event
of connection loss or failure, and some brokers support
HA in way of *Master/Master* or *Master/Slave* replication.

- **Fast**

A single Celery process can process millions of tasks a minute,
with sub-millisecond round-trip latency (using RabbitMQ,
py-librabbitmq, and optimized settings).

- **Flexible**

Almost every part of *Celery* can be extended or used on its own,
Custom pool implementations, serializers, compression schemes, logging,
schedulers, consumers, producers, autoscalers, broker transports and much more.

It supports...
==============

- **Message Transports**

- RabbitMQ_, Redis_,
- MongoDB_ (experimental), Amazon SQS (experimental),
- CouchDB_ (experimental), SQLAlchemy_ (experimental),
- Django ORM (experimental), `IronMQ`_
- and more...

- **Concurrency**

- Prefork, Eventlet_, gevent_, threads/single threaded

- **Result Stores**

- AMQP, Redis
- memcached, MongoDB
- SQLAlchemy, Django ORM
- Apache Cassandra, IronCache

- **Serialization**

- *pickle*, *json*, *yaml*, *msgpack*.
- *zlib*, *bzip2* compression.
- Cryptographic message signing.

.. _`Eventlet`: http://eventlet.net/
.. _`gevent`: http://gevent.org/

.. _RabbitMQ: http://rabbitmq.com
.. _Redis: http://redis.io
.. _MongoDB: http://mongodb.org
.. _Beanstalk: http://kr.github.com/beanstalkd
.. _CouchDB: http://couchdb.apache.org
.. _SQLAlchemy: http://sqlalchemy.org
.. _`IronMQ`: http://iron.io

Framework Integration
=====================

Celery is easy to integrate with web frameworks, some of which even have
integration packages:

+--------------------+------------------------+
| `Django`_ | not needed |
+--------------------+------------------------+
| `Pyramid`_ | `pyramid_celery`_ |
+--------------------+------------------------+
| `Pylons`_ | `celery-pylons`_ |
+--------------------+------------------------+
| `Flask`_ | not needed |
+--------------------+------------------------+
| `web2py`_ | `web2py-celery`_ |
+--------------------+------------------------+
| `Tornado`_ | `tornado-celery`_ |
+--------------------+------------------------+

The integration packages are not strictly necessary, but they can make
development easier, and sometimes they add important hooks like closing
database connections at ``fork``.

.. _`Django`: http://djangoproject.com/
.. _`Pylons`: http://pylonsproject.org/
.. _`Flask`: http://flask.pocoo.org/
.. _`web2py`: http://web2py.com/
.. _`Bottle`: http://bottlepy.org/
.. _`Pyramid`: http://docs.pylonsproject.org/en/latest/docs/pyramid.html
.. _`pyramid_celery`: http://pypi.python.org/pypi/pyramid_celery/
.. _`django-celery`: http://pypi.python.org/pypi/django-celery
.. _`celery-pylons`: http://pypi.python.org/pypi/celery-pylons
.. _`web2py-celery`: http://code.google.com/p/web2py-celery/
.. _`Tornado`: http://www.tornadoweb.org/
.. _`tornado-celery`: http://github.com/mher/tornado-celery/

.. _celery-documentation:

Documentation
=============

The `latest documentation`_ with user guides, tutorials and API reference
is hosted at Read The Docs.

.. _`latest documentation`: http://docs.celeryproject.org/en/latest/

.. _celery-installation:

Installation
============

You can install Celery either via the Python Package Index (PyPI)
or from source.

To install using `pip`,::

$ pip install -U Celery

To install using `easy_install`,::

$ easy_install -U Celery

.. _bundles:

Bundles
-------

Celery also defines a group of bundles that can be used
to install Celery and the dependencies for a given feature.

You can specify these in your requirements or on the ``pip`` comand-line
by using brackets. Multiple bundles can be specified by separating them by
commas.
::

$ pip install "celery[librabbitmq]"

$ pip install "celery[librabbitmq,redis,auth,msgpack]"

The following bundles are available:

Serializers
~~~~~~~~~~~

:celery[auth]:
for using the auth serializer.

:celery[msgpack]:
for using the msgpack serializer.

:celery[yaml]:
for using the yaml serializer.

Concurrency
~~~~~~~~~~~

:celery[eventlet]:
for using the eventlet pool.

:celery[gevent]:
for using the gevent pool.

:celery[threads]:
for using the thread pool.

Transports and Backends
~~~~~~~~~~~~~~~~~~~~~~~

:celery[librabbitmq]:
for using the librabbitmq C library.

:celery[redis]:
for using Redis as a message transport or as a result backend.

:celery[mongodb]:
for using MongoDB as a message transport (*experimental*),
or as a result backend (*supported*).

:celery[sqs]:
for using Amazon SQS as a message transport (*experimental*).

:celery[memcache]:
for using memcached as a result backend.

:celery[cassandra]:
for using Apache Cassandra as a result backend.

:celery[couchdb]:
for using CouchDB as a message transport (*experimental*).

:celery[couchbase]:
for using CouchBase as a result backend.

:celery[beanstalk]:
for using Beanstalk as a message transport (*experimental*).

:celery[zookeeper]:
for using Zookeeper as a message transport.

:celery[zeromq]:
for using ZeroMQ as a message transport (*experimental*).

:celery[sqlalchemy]:
for using SQLAlchemy as a message transport (*experimental*),
or as a result backend (*supported*).

:celery[pyro]:
for using the Pyro4 message transport (*experimental*).

:celery[slmq]:
for using the SoftLayer Message Queue transport (*experimental*).

.. _celery-installing-from-source:

Downloading and installing from source
--------------------------------------

Download the latest version of Celery from
http://pypi.python.org/pypi/celery/

You can install it by doing the following,::

$ tar xvfz celery-0.0.0.tar.gz
$ cd celery-0.0.0
$ python setup.py build
# python setup.py install

The last command must be executed as a privileged user if
you are not currently using a virtualenv.

.. _celery-installing-from-git:

Using the development version
-----------------------------

With pip
~~~~~~~~

The Celery development version also requires the development
versions of ``kombu``, ``amqp`` and ``billiard``.

You can install the latest snapshot of these using the following
pip commands::

$ pip install https://github.com/celery/celery/zipball/master#egg=celery
$ pip install https://github.com/celery/billiard/zipball/master#egg=billiard
$ pip install https://github.com/celery/py-amqp/zipball/master#egg=amqp
$ pip install https://github.com/celery/kombu/zipball/master#egg=kombu

With git
~~~~~~~~

Please the Contributing section.

.. _getting-help:

Getting Help
============

.. _mailing-list:

Mailing list
------------

For discussions about the usage, development, and future of celery,
please join the `celery-users`_ mailing list.

.. _`celery-users`: http://groups.google.com/group/celery-users/

.. _irc-channel:

IRC
---

Come chat with us on IRC. The **#celery** channel is located at the `Freenode`_
network.

.. _`Freenode`: http://freenode.net

.. _bug-tracker:

Bug tracker
===========

If you have any suggestions, bug reports or annoyances please report them
to our issue tracker at http://github.com/celery/celery/issues/

.. _wiki:

Wiki
====

http://wiki.github.com/celery/celery/

.. _contributing-short:

Contributing
============

Development of `celery` happens at Github: http://github.com/celery/celery

You are highly encouraged to participate in the development
of `celery`. If you don't like Github (for some reason) you're welcome
to send regular patches.

Be sure to also read the `Contributing to Celery`_ section in the
documentation.

.. _`Contributing to Celery`:
http://docs.celeryproject.org/en/master/contributing.html

.. _license:

License
=======

This software is licensed under the `New BSD License`. See the ``LICENSE``
file in the top distribution directory for the full license text.

.. # vim: syntax=rst expandtab tabstop=4 shiftwidth=4 shiftround


.. image:: https://d2weczhvl823v0.cloudfront.net/celery/celery/trend.png
:alt: Bitdeli badge
:target: https://bitdeli.com/free



thesisenv/lib/python3.6/site-packages/pip-18.0.dist-info/top_level.txt → thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/INSTALLER View File


+ 500
- 0
thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/METADATA View File

@@ -0,0 +1,500 @@
Metadata-Version: 2.0
Name: celery
Version: 3.1.26.post2
Summary: Distributed Task Queue
Home-page: http://celeryproject.org
Author: Ask Solem
Author-email: ask@celeryproject.org
License: BSD
Description-Content-Type: UNKNOWN
Platform: any
Classifier: Development Status :: 5 - Production/Stable
Classifier: License :: OSI Approved :: BSD License
Classifier: Topic :: System :: Distributed Computing
Classifier: Topic :: Software Development :: Object Brokering
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Programming Language :: Python :: Implementation :: PyPy
Classifier: Programming Language :: Python :: Implementation :: Jython
Classifier: Operating System :: OS Independent
Classifier: Operating System :: POSIX
Classifier: Operating System :: Microsoft :: Windows
Classifier: Operating System :: MacOS :: MacOS X
Requires-Dist: pytz (>dev)
Requires-Dist: billiard (<3.4,>=3.3.0.23)
Requires-Dist: kombu (<3.1,>=3.0.37)
Provides-Extra: auth
Requires-Dist: pyOpenSSL; extra == 'auth'
Provides-Extra: beanstalk
Requires-Dist: beanstalkc; extra == 'beanstalk'
Provides-Extra: cassandra
Requires-Dist: pycassa; extra == 'cassandra'
Provides-Extra: couchbase
Requires-Dist: couchbase; extra == 'couchbase'
Provides-Extra: couchdb
Requires-Dist: couchdb; extra == 'couchdb'
Provides-Extra: eventlet
Requires-Dist: eventlet; extra == 'eventlet'
Provides-Extra: gevent
Requires-Dist: gevent; extra == 'gevent'
Provides-Extra: librabbitmq
Requires-Dist: librabbitmq (>=1.6.1); extra == 'librabbitmq'
Provides-Extra: memcache
Requires-Dist: pylibmc; extra == 'memcache'
Provides-Extra: mongodb
Requires-Dist: pymongo (>=2.6.2); extra == 'mongodb'
Provides-Extra: msgpack
Requires-Dist: msgpack-python (>=0.3.0); extra == 'msgpack'
Provides-Extra: pyro
Requires-Dist: pyro4; extra == 'pyro'
Provides-Extra: redis
Requires-Dist: redis (>=2.8.0); extra == 'redis'
Provides-Extra: slmq
Requires-Dist: softlayer-messaging (>=1.0.3); extra == 'slmq'
Provides-Extra: sqlalchemy
Requires-Dist: sqlalchemy; extra == 'sqlalchemy'
Provides-Extra: sqs
Requires-Dist: boto (>=2.13.3); extra == 'sqs'
Provides-Extra: threads
Requires-Dist: threadpool; extra == 'threads'
Provides-Extra: yaml
Requires-Dist: PyYAML (>=3.10); extra == 'yaml'
Provides-Extra: zeromq
Requires-Dist: pyzmq (>=13.1.0); extra == 'zeromq'
Provides-Extra: zookeeper
Requires-Dist: kazoo (>=1.3.1); extra == 'zookeeper'

=================================
celery - Distributed Task Queue
=================================

.. image:: http://cloud.github.com/downloads/celery/celery/celery_128.png

:Version: 3.1.26 (Cipater)
:Web: http://celeryproject.org/
:Download: http://pypi.python.org/pypi/celery/
:Source: http://github.com/celery/celery/
:Keywords: task queue, job queue, asynchronous, async, rabbitmq, amqp, redis,
python, webhooks, queue, distributed

--

What is a Task Queue?
=====================

Task queues are used as a mechanism to distribute work across threads or
machines.

A task queue's input is a unit of work, called a task, dedicated worker
processes then constantly monitor the queue for new work to perform.

Celery communicates via messages, usually using a broker
to mediate between clients and workers. To initiate a task a client puts a
message on the queue, the broker then delivers the message to a worker.

A Celery system can consist of multiple workers and brokers, giving way
to high availability and horizontal scaling.

Celery is a library written in Python, but the protocol can be implemented in
any language. So far there's RCelery_ for the Ruby programming language, and a
`PHP client`, but language interoperability can also be achieved
by using webhooks.

.. _RCelery: http://leapfrogdevelopment.github.com/rcelery/
.. _`PHP client`: https://github.com/gjedeer/celery-php
.. _`using webhooks`:
http://docs.celeryproject.org/en/latest/userguide/remote-tasks.html

What do I need?
===============

Celery version 3.0 runs on,

- Python (2.5, 2.6, 2.7, 3.2, 3.3)
- PyPy (1.8, 1.9)
- Jython (2.5, 2.7).

This is the last version to support Python 2.5,
and from Celery 3.1, Python 2.6 or later is required.
The last version to support Python 2.4 was Celery series 2.2.

*Celery* is usually used with a message broker to send and receive messages.
The RabbitMQ, Redis transports are feature complete,
but there's also experimental support for a myriad of other solutions, including
using SQLite for local development.

*Celery* can run on a single machine, on multiple machines, or even
across datacenters.

Get Started
===========

If this is the first time you're trying to use Celery, or you are
new to Celery 3.0 coming from previous versions then you should read our
getting started tutorials:

- `First steps with Celery`_

Tutorial teaching you the bare minimum needed to get started with Celery.

- `Next steps`_

A more complete overview, showing more features.

.. _`First steps with Celery`:
http://docs.celeryproject.org/en/latest/getting-started/first-steps-with-celery.html

.. _`Next steps`:
http://docs.celeryproject.org/en/latest/getting-started/next-steps.html

Celery is...
============

- **Simple**

Celery is easy to use and maintain, and does *not need configuration files*.

It has an active, friendly community you can talk to for support,
including a `mailing-list`_ and and an IRC channel.

Here's one of the simplest applications you can make::

from celery import Celery

app = Celery('hello', broker='amqp://guest@localhost//')

@app.task
def hello():
return 'hello world'

- **Highly Available**

Workers and clients will automatically retry in the event
of connection loss or failure, and some brokers support
HA in way of *Master/Master* or *Master/Slave* replication.

- **Fast**

A single Celery process can process millions of tasks a minute,
with sub-millisecond round-trip latency (using RabbitMQ,
py-librabbitmq, and optimized settings).

- **Flexible**

Almost every part of *Celery* can be extended or used on its own,
Custom pool implementations, serializers, compression schemes, logging,
schedulers, consumers, producers, autoscalers, broker transports and much more.

It supports...
==============

- **Message Transports**

- RabbitMQ_, Redis_,
- MongoDB_ (experimental), Amazon SQS (experimental),
- CouchDB_ (experimental), SQLAlchemy_ (experimental),
- Django ORM (experimental), `IronMQ`_
- and more...

- **Concurrency**

- Prefork, Eventlet_, gevent_, threads/single threaded

- **Result Stores**

- AMQP, Redis
- memcached, MongoDB
- SQLAlchemy, Django ORM
- Apache Cassandra, IronCache

- **Serialization**

- *pickle*, *json*, *yaml*, *msgpack*.
- *zlib*, *bzip2* compression.
- Cryptographic message signing.

.. _`Eventlet`: http://eventlet.net/
.. _`gevent`: http://gevent.org/

.. _RabbitMQ: http://rabbitmq.com
.. _Redis: http://redis.io
.. _MongoDB: http://mongodb.org
.. _Beanstalk: http://kr.github.com/beanstalkd
.. _CouchDB: http://couchdb.apache.org
.. _SQLAlchemy: http://sqlalchemy.org
.. _`IronMQ`: http://iron.io

Framework Integration
=====================

Celery is easy to integrate with web frameworks, some of which even have
integration packages:

+--------------------+------------------------+
| `Django`_ | not needed |
+--------------------+------------------------+
| `Pyramid`_ | `pyramid_celery`_ |
+--------------------+------------------------+
| `Pylons`_ | `celery-pylons`_ |
+--------------------+------------------------+
| `Flask`_ | not needed |
+--------------------+------------------------+
| `web2py`_ | `web2py-celery`_ |
+--------------------+------------------------+
| `Tornado`_ | `tornado-celery`_ |
+--------------------+------------------------+

The integration packages are not strictly necessary, but they can make
development easier, and sometimes they add important hooks like closing
database connections at ``fork``.

.. _`Django`: http://djangoproject.com/
.. _`Pylons`: http://pylonsproject.org/
.. _`Flask`: http://flask.pocoo.org/
.. _`web2py`: http://web2py.com/
.. _`Bottle`: http://bottlepy.org/
.. _`Pyramid`: http://docs.pylonsproject.org/en/latest/docs/pyramid.html
.. _`pyramid_celery`: http://pypi.python.org/pypi/pyramid_celery/
.. _`django-celery`: http://pypi.python.org/pypi/django-celery
.. _`celery-pylons`: http://pypi.python.org/pypi/celery-pylons
.. _`web2py-celery`: http://code.google.com/p/web2py-celery/
.. _`Tornado`: http://www.tornadoweb.org/
.. _`tornado-celery`: http://github.com/mher/tornado-celery/

.. _celery-documentation:

Documentation
=============

The `latest documentation`_ with user guides, tutorials and API reference
is hosted at Read The Docs.

.. _`latest documentation`: http://docs.celeryproject.org/en/latest/

.. _celery-installation:

Installation
============

You can install Celery either via the Python Package Index (PyPI)
or from source.

To install using `pip`,::

$ pip install -U Celery

To install using `easy_install`,::

$ easy_install -U Celery

.. _bundles:

Bundles
-------

Celery also defines a group of bundles that can be used
to install Celery and the dependencies for a given feature.

You can specify these in your requirements or on the ``pip`` comand-line
by using brackets. Multiple bundles can be specified by separating them by
commas.
::

$ pip install "celery[librabbitmq]"

$ pip install "celery[librabbitmq,redis,auth,msgpack]"

The following bundles are available:

Serializers
~~~~~~~~~~~

:celery[auth]:
for using the auth serializer.

:celery[msgpack]:
for using the msgpack serializer.

:celery[yaml]:
for using the yaml serializer.

Concurrency
~~~~~~~~~~~

:celery[eventlet]:
for using the eventlet pool.

:celery[gevent]:
for using the gevent pool.

:celery[threads]:
for using the thread pool.

Transports and Backends
~~~~~~~~~~~~~~~~~~~~~~~

:celery[librabbitmq]:
for using the librabbitmq C library.

:celery[redis]:
for using Redis as a message transport or as a result backend.

:celery[mongodb]:
for using MongoDB as a message transport (*experimental*),
or as a result backend (*supported*).

:celery[sqs]:
for using Amazon SQS as a message transport (*experimental*).

:celery[memcache]:
for using memcached as a result backend.

:celery[cassandra]:
for using Apache Cassandra as a result backend.

:celery[couchdb]:
for using CouchDB as a message transport (*experimental*).

:celery[couchbase]:
for using CouchBase as a result backend.

:celery[beanstalk]:
for using Beanstalk as a message transport (*experimental*).

:celery[zookeeper]:
for using Zookeeper as a message transport.

:celery[zeromq]:
for using ZeroMQ as a message transport (*experimental*).

:celery[sqlalchemy]:
for using SQLAlchemy as a message transport (*experimental*),
or as a result backend (*supported*).

:celery[pyro]:
for using the Pyro4 message transport (*experimental*).

:celery[slmq]:
for using the SoftLayer Message Queue transport (*experimental*).

.. _celery-installing-from-source:

Downloading and installing from source
--------------------------------------

Download the latest version of Celery from
http://pypi.python.org/pypi/celery/

You can install it by doing the following,::

$ tar xvfz celery-0.0.0.tar.gz
$ cd celery-0.0.0
$ python setup.py build
# python setup.py install

The last command must be executed as a privileged user if
you are not currently using a virtualenv.

.. _celery-installing-from-git:

Using the development version
-----------------------------

With pip
~~~~~~~~

The Celery development version also requires the development
versions of ``kombu``, ``amqp`` and ``billiard``.

You can install the latest snapshot of these using the following
pip commands::

$ pip install https://github.com/celery/celery/zipball/master#egg=celery
$ pip install https://github.com/celery/billiard/zipball/master#egg=billiard
$ pip install https://github.com/celery/py-amqp/zipball/master#egg=amqp
$ pip install https://github.com/celery/kombu/zipball/master#egg=kombu

With git
~~~~~~~~

Please the Contributing section.

.. _getting-help:

Getting Help
============

.. _mailing-list:

Mailing list
------------

For discussions about the usage, development, and future of celery,
please join the `celery-users`_ mailing list.

.. _`celery-users`: http://groups.google.com/group/celery-users/

.. _irc-channel:

IRC
---

Come chat with us on IRC. The **#celery** channel is located at the `Freenode`_
network.

.. _`Freenode`: http://freenode.net

.. _bug-tracker:

Bug tracker
===========

If you have any suggestions, bug reports or annoyances please report them
to our issue tracker at http://github.com/celery/celery/issues/

.. _wiki:

Wiki
====

http://wiki.github.com/celery/celery/

.. _contributing-short:

Contributing
============

Development of `celery` happens at Github: http://github.com/celery/celery

You are highly encouraged to participate in the development
of `celery`. If you don't like Github (for some reason) you're welcome
to send regular patches.

Be sure to also read the `Contributing to Celery`_ section in the
documentation.

.. _`Contributing to Celery`:
http://docs.celeryproject.org/en/master/contributing.html

.. _license:

License
=======

This software is licensed under the `New BSD License`. See the ``LICENSE``
file in the top distribution directory for the full license text.

.. # vim: syntax=rst expandtab tabstop=4 shiftwidth=4 shiftround


.. image:: https://d2weczhvl823v0.cloudfront.net/celery/celery/trend.png
:alt: Bitdeli badge
:target: https://bitdeli.com/free



+ 496
- 0
thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/RECORD View File

@@ -0,0 +1,496 @@
celery/__init__.py,sha256=3CpQmXwUsO3qBXRvUbCUgeb95Hs76iUyti10oevsJWw,5727
celery/__main__.py,sha256=Zdv8wB4CbSvtgrGUVIZyFkQcHxFS7z3RRijGi4uQMN4,983
celery/_state.py,sha256=TU-oQvKpbZtrYpU6iF7OJsekP897J_qRR0Y62Y4LSy8,3921
celery/beat.py,sha256=kcwCMfxcS7Jvd2p7dMmK0J4NO79-OlVQbJJokJWDcHI,19009
celery/bootsteps.py,sha256=ASlSzf2DFfYfOvtud-p-m_zo7K3f5IKspzTAzjlfNc8,12382
celery/canvas.py,sha256=b5WZZqdHuI2bhVbroMY-K2VU_XXBY0m5hkxfy3-KNFY,22501
celery/datastructures.py,sha256=i0evKEjw8-OPZyT77Fjr7q-nrVKPKyk3IbQ94jJzoOk,18647
celery/exceptions.py,sha256=8SJV-PofoiyOwbSzb8CueVeKlBDTJDHkkgPQE1Suu-w,4526
celery/five.py,sha256=a-4lbbb-qHnEm0kh7BjENwWIuft-R4WzIC2htemnIsY,11695
celery/local.py,sha256=vXD1d-QliYsrKAJgsIj0ZNG1KEXHcoB2Ty1JEOWH_Yg,10818
celery/platforms.py,sha256=0W1WSk8b3AQ6oNhtM5JEgN27DHoXZzzSEJ3nvjwuBs0,24774
celery/result.py,sha256=kzlMWbWxY_rfI90RsmrV2LB8c7X2iJDaYcOh5esAhy8,28701
celery/schedules.py,sha256=XrWguXKa8-umIbG805tvzPmUbM6B2d41SKqr86CYUy8,21787
celery/signals.py,sha256=zuwvWGAyIQLL4F0p83wRSbjBVdnQDnEsiCC3H3_3BAc,2929
celery/states.py,sha256=qZ880RMvRcspPb87u13wlfiP0ilQh_-Ap_I8-l0PM6w,3430
celery/app/__init__.py,sha256=Pg6NKoOd4tbajaxrIpMcYqV_gbguCnWGbUavNUJtRVg,4380
celery/app/amqp.py,sha256=MCAmCd20hXGAO0ilV78BUUPDNxRpE5gLD7vKIodQswk,19101
celery/app/annotations.py,sha256=mwfXit7ZByMykH0Su7KutgEXC2DxN0MIVKwioXtiqPU,1514
celery/app/base.py,sha256=knLzZffbOmaC3LAF-zXDzNeVzuOnzr28o_y7EZ7_mFs,24196
celery/app/builtins.py,sha256=Dmvkm-aeMosvGdFbNGXua5otk81Qjwh5vEIGjlZjPDg,14180
celery/app/control.py,sha256=7CrvxyZE-fIW0gSDvfUSbaZN5nGd7AWFSUlKKC5AXoI,11023
celery/app/defaults.py,sha256=4wUvjXss3CoJvdF5B271hot1rquOn26nXHvZ2dbQHaw,11238
celery/app/log.py,sha256=LzKSBh61d_ZK_yCW5zfR4dOmzSOs6a4cjyAFg75cZu0,9065
celery/app/registry.py,sha256=pJLgSmSyNtn-q-GESpQQSr2TFzh8yQvPuDHD7XzwxU8,1748
celery/app/routes.py,sha256=YzooevUbmGNrrAHGR0AwFxtuKWL2xP6g14l99dssaFI,2967
celery/app/task.py,sha256=TclL59Gs79Sn7h5HVdHOdQtxDU3AfgQJKB7PZz5RzZY,35574
celery/app/trace.py,sha256=lmdPyBwFKSxkfTjVPOKaTD6Rnnhs1FIHdOhcbcVmhaQ,16717
celery/app/utils.py,sha256=oR28DoRzVVMaSFOMZ47JFGvFAP3aTtPEEH7B1LsmFAs,8367
celery/apps/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
celery/apps/beat.py,sha256=Yu31IM0cKqM5FWt1motBjRBAYvpIsODrPRDAp1J_IYI,5189
celery/apps/worker.py,sha256=c8mxAhCSpG5K9snPXHwpDnCCOggVMlKnH4sS2Dq8SO8,12555
celery/backends/__init__.py,sha256=2DzVIyTm-lq5W6ElqMAK9AiJxCynp6E-bF7gPoFgfAk,2206
celery/backends/amqp.py,sha256=p1coJ96bJR-V__RztU58zzlilclnFqK1Mkp1NYnf44E,11622
celery/backends/base.py,sha256=pBDi5K-SO7bWRB-gXNcDky5ADO0xwJazfOkRFvsMuFc,22572
celery/backends/cache.py,sha256=DErN0OboNLQRmL-_E6wEbBmxylZPCUJOfyydDryW5wE,4635
celery/backends/cassandra.py,sha256=UL4qeFYa5qUC0E7oJRmci2JhDp5z7d_OPNsJnkw-B6M,7219
celery/backends/couchbase.py,sha256=F_qczQDDBmOmViFP8M0RZ0NXPlCWxFovqqGoB4WYghk,3382
celery/backends/mongodb.py,sha256=Ke9zj5vhmTnVAHplOhiec8B5D62_ty30PDZEF_8LFck,8688
celery/backends/redis.py,sha256=gBz8DhREI1rKMFwQ9behNVQP8qrWJoBwU3HAi9C4tXU,10566
celery/backends/rpc.py,sha256=Qllbxw0T-rt6mgRxmNnZUlFgvpSgOiQOdBAU6mjlcGY,1790
celery/backends/database/__init__.py,sha256=gCwDd2xkJ95jxwGWcIz9IIst1aryaGJ4NjufR7xPmmo,6568
celery/backends/database/models.py,sha256=k_WXPzVk9BCGm63ne4nhQO5cDpA-WJ4afaoCtdk7dLE,2261
celery/backends/database/session.py,sha256=tGJAnVNXOj-LW_z8Blh9u8aZ8j01M0aOLHomOrkNmvE,1840
celery/bin/__init__.py,sha256=YULxAVdpSTcKce56Bt_l9rXSho8pqpcp082NwnkTRHs,87
celery/bin/amqp.py,sha256=WoQCn_sg9Vbj7Bryd-sUNxNePtsl91c5_Oi3z1W0_Jk,11651
celery/bin/base.py,sha256=saxceFnADwkNVLySAqgSaBu1W9LKfD2rfP6co_wtcBQ,21336
celery/bin/beat.py,sha256=abMzN3d3Zu8VBKAeSiZuG1_P1loqTsu7TZWdkXt1ugM,2638
celery/bin/celery.py,sha256=4BfRWimQltbDzUqIKmq_OSm2X4DYhwUgc0ypyDabLig,29485
celery/bin/celeryd_detach.py,sha256=oWGoWfOgaSTi4hb-EpAKHWUPA1gXG0sjlMp6pz4PPuA,6026
celery/bin/events.py,sha256=cSFvfzN5OHNdva0Yuzz5HNM1jhZZXtcaqdL0exVI578,4052
celery/bin/graph.py,sha256=JycXaXGTtIyxCy96ph1Zk8FQ_3wk-9fhCDueH4hWneo,6420
celery/bin/multi.py,sha256=owyqxdQROMFAJUMt-L5BFc8DQveSKftDHcZDlRjs_Sc,21265
celery/bin/worker.py,sha256=P78klQzKKb872rCEXWj5MGUITA7ZN5pxiy559zjd5aU,9014
celery/concurrency/__init__.py,sha256=t_AgXnznrRCoiAV_7ClDUzhwwu39rKIlpjr0vF7hbDg,820
celery/concurrency/asynpool.py,sha256=MoEzDfw-po8p_kEUwjRRAATpuUoJ8hUM-BhbFHVKo0w,47804
celery/concurrency/base.py,sha256=G_AOmxS6wsAMQ8BPcZWK2AoT4y30Sm76TtkZdGgdlrg,4203
celery/concurrency/eventlet.py,sha256=c2R3K9Trpow43SkvnfFzkR65gbihJhIBTCaQod1LD7E,4287
celery/concurrency/gevent.py,sha256=KjdibnAD5YfVDh1WHRKIoYKLCdGHp31WOOxXPy9UyMw,3509
celery/concurrency/prefork.py,sha256=udTgaF-QycG4ZiDpZo_QhtjCuxcM1CUKUk4dhlXQMOU,5755
celery/concurrency/solo.py,sha256=zi0qLzLJjO8ApdUokc-5UimsJyQFhD-_acSn8Cwgclc,696
celery/concurrency/threads.py,sha256=N41qfLMLyWqv1cClfAm3ICATWJmC8DqfF3jReRkjgt8,1767
celery/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
celery/contrib/abortable.py,sha256=bnOC_4lmXSrtGkvSFndEdWiJNyscynLrcpGKnr1NhcM,5094
celery/contrib/batches.py,sha256=1GaaJw77TSjslI3B_iYleRrM-EPBDCNstmcahC8ER7U,7152
celery/contrib/methods.py,sha256=PVmZu0PQ1rrAKzb4GzuyEPCYPUgyuFasjMpUFhEOJzU,2613
celery/contrib/migrate.py,sha256=rMbY-7sn7sgmwkpqDleFCBUg1qR1weSi3DDmIYbss-c,11911
celery/contrib/rdb.py,sha256=sH69j4_YBBwE9TPlqykaAlf11AN7a7r5_J3Yf5oqAeQ,4986
celery/contrib/sphinx.py,sha256=SZd8CT67_MkcFrPUuiqDbjRF2B1QKEMO0H_ZnQcOTAQ,2019
celery/events/__init__.py,sha256=HVSYE0r5JKMwtBbmeas_nM0LZM5wCBSPhR5lQ7GpYiI,14166
celery/events/cursesmon.py,sha256=4sUQ8eulZwoaIRxSOwxVON86QknY2RO9Sf8dvtzyTZs,18311
celery/events/dumper.py,sha256=LXvJDwjkO1mnny35r5xChiQinu3pDk5mJRK41PgPMnA,3285
celery/events/snapshot.py,sha256=gkM6AkWp5Jv49gurjDDeI-NFa5FUWzwZ0A2ALiuV0EI,3268
celery/events/state.py,sha256=5Qffr6Abj7ASxtV4-p_60PcHSVVneToW0e2Scgx6z5Q,23275
celery/fixups/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
celery/fixups/django.py,sha256=fyPyX9OUnWhAuk-TWm--0XaeY3dNsdBOrpIvcTvvZWE,9093
celery/loaders/__init__.py,sha256=qpF2RdoBeubV_teLZ2yKoRq8sj4aqLjRBoSCgyte47Y,1015
celery/loaders/app.py,sha256=fFFD4SVrECpzM60DZVcnLjDtYhr1tf4ABPtkD3H1MbE,269
celery/loaders/base.py,sha256=mclr01KtYoD0oXtYSg6erKYw8Bb27u0LJrBrD4hCFQk,9303
celery/loaders/default.py,sha256=KH0Y2iA486qelyzoewv47SynpYJIofW2vbdFTcmGYbE,1705
celery/security/__init__.py,sha256=KbfxRiy_FHJbYqVsedV7MlAPsThJdwxhjV5F3IjgQAU,1923
celery/security/certificate.py,sha256=Mc925ch6wLe2sYXmBsRm7rmr2FXclq1wopEdVpRc6kc,2746
celery/security/key.py,sha256=rBdjSYIgTHhqrSN2YUmqOU3xn56vamLigpZTtvSQqDI,679
celery/security/serialization.py,sha256=D9iZNvuxA-SQXolHWOyGRnNPwCNnEqFbjayhf9vQ3E8,4011
celery/security/utils.py,sha256=mI12UmxFkxzNCdWsrv71N6r9qNHGZwy9weSl_HaCNP0,759
celery/task/__init__.py,sha256=d0iH36VG3zOfCCv6KjvXrcO-eePFOryCLexFeUI8PLc,1743
celery/task/base.py,sha256=zkKUF640T8cf2ltk5b_6MOWYwNOYbjqshE9ofceihn0,5583
celery/task/http.py,sha256=qEu9tPSqSit-5L6MuOJY1EFutFim8JVGL9bked9uSFw,6849
celery/task/sets.py,sha256=GStbowg-IQW2Xu96qV6leMiYth3gQ9mQAcKy-3hNHkI,2769
celery/task/trace.py,sha256=unQgQJ3BjhhvPQnkBqJ-WsHj74_nvYmYSn_E1pyGcm4,323
celery/tests/__init__.py,sha256=G98w19Jt-55CrtCUpBzoE7ooUoDbBH_4OJmune3k0D4,2618
celery/tests/case.py,sha256=kWtIhEH582gUSNcvSAJeH37RvUuyEEy8svDzuT6ewMg,25267
celery/tests/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
celery/tests/app/test_amqp.py,sha256=yn8vLfD3lDdPjaTE9NGsNR3aQgcKJX3KNC0Uo8h3p3g,7778
celery/tests/app/test_annotations.py,sha256=guYRiOgF-jqzqTKdjAbC469_nKjxtdq-PxVJNbMMvow,1532
celery/tests/app/test_app.py,sha256=vlytaWTW7VcOhqIM4RPkcTRjpp7XtTwPjpEwo7AC3ns,23272
celery/tests/app/test_beat.py,sha256=zoHiwseH7Vw5YOcerhDMpVKog9QgIPXa7POdMTeb6JM,17387
celery/tests/app/test_builtins.py,sha256=OxqNpLV9Z6KFrtxokJ8VHVuec-dA40nLCtMVH22pwBw,6575
celery/tests/app/test_celery.py,sha256=Q4XTxX96kX-IUZMw95Q96AmVeeE1L00_2bfTOORodJg,535
celery/tests/app/test_control.py,sha256=IcbpqPMVaOsL-9vaskBq8Hx2V7_09CYC5Y8kuobX538,7022
celery/tests/app/test_defaults.py,sha256=gDxD5htqT_cFeUruz8neLLj-V1ffys5nb7u7138VlKQ,1815
celery/tests/app/test_exceptions.py,sha256=co-o7xbNKNBAIsIW5E4x5dQntv-HK-72e1PnqsOR3Ag,849
celery/tests/app/test_loaders.py,sha256=h5c_QJcsmoD56Uwhsi4cACK3w4cP1dnd3d-8-rOUtC0,9487
celery/tests/app/test_log.py,sha256=nW_uMGURkHnEs-vEGg-ciTYQmXPoQXcfAvfSe7jPZpY,12745
celery/tests/app/test_registry.py,sha256=Kw6BIkMuJMt-XRMLnVr1Dce3MLZeO4J5-abCEwGf5NM,2512
celery/tests/app/test_routes.py,sha256=ZuoWarzltzzRx58cB8dam8i1qkZKf00A2IpkBxfCWkQ,5354
celery/tests/app/test_schedules.py,sha256=KxjiGMXjuzGr0IZsb-Bph2AhUPeNAKNhBBajBSZ7XNo,28559
celery/tests/app/test_utils.py,sha256=10EAWo_5AyYYddROKuSiylZixzChcqdUg06Wev2PIqw,1309
celery/tests/backends/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
celery/tests/backends/test_amqp.py,sha256=j6HCUJv4JAn-UGjx9lwW-ZbrcGFzkXXPxtW--CeaGDY,14161
celery/tests/backends/test_backends.py,sha256=DYm8tSsuUHSz1Gxnm1yBvNa1dHBbXn-WVrARWOoN6Vw,1535
celery/tests/backends/test_base.py,sha256=vt2vdWekD0bEPT-L-ovdxit5RWbBn3RDdRMmjPBOglc,16071
celery/tests/backends/test_cache.py,sha256=32keeBhHGLqlDDHzqviHwbAewuRpQPrPTnhv_6aW4fM,10280
celery/tests/backends/test_cassandra.py,sha256=HOtGEfL82sUXBNOIr0D3z3fINmeeZH-mBDnOD83B93s,6412
celery/tests/backends/test_couchbase.py,sha256=9Wu1cQ3UKUCV-yrrufeqpAQVvqyeMV1VjGFHXeQxAq0,4782
celery/tests/backends/test_database.py,sha256=NlN4WTret69GSJrSJBGEU9IDFg1UdFEwpBQoJaI6FSk,6198
celery/tests/backends/test_mongodb.py,sha256=xGbVOXl7Jfzpi1nYoVAw3RGRH-l89HYbejMS04-i8SM,14247
celery/tests/backends/test_redis.py,sha256=uVPsHdOU14GSPZPLA6SY2JUUo79GltfUFVy1Olfc7fM,8655
celery/tests/backends/test_rpc.py,sha256=iQBb0efYHvSSppUc6IaK2L-Jbr_Utk2iUpOoT8AzfYI,2317
celery/tests/bin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
celery/tests/bin/test_amqp.py,sha256=paYj2ZdtfeodT9zmrwL8Pbmk2aCUhkGnAgbnEmrQZ6k,4721
celery/tests/bin/test_base.py,sha256=8EXItbrOQT1L-bKP0fxjiwkkEjEMiApqBJrLw0xqbIc,11301
celery/tests/bin/test_beat.py,sha256=QvTecx2yqc-e0KrQjqAXB3aISc999IHc__I10s6yOJc,5464
celery/tests/bin/test_celery.py,sha256=CrMMqM3duzFMCt1xPHDf7GNpp7-9otCJFiN2R4HVI3U,18700
celery/tests/bin/test_celeryd_detach.py,sha256=TchgSUR8vDB8OqRF6VpnYMKktpGrgZIQLXJhShWLcpE,4000
celery/tests/bin/test_celeryevdump.py,sha256=1ImmCOndSESTVvARls0Wjngvd86NFp4WCF9r32OI8HI,2231
celery/tests/bin/test_events.py,sha256=HYPiQJcFumiSHwtMnXO8dcURW2eNknyTCoSwpOWhm1w,2435
celery/tests/bin/test_multi.py,sha256=MVGxbabKXDPgAmdME3K8zSmZ9bTjKkMviBCP0RHoum4,16477
celery/tests/bin/test_worker.py,sha256=9LJJrDjzRQzM7LAPbEF0sK5mxLj8Xpjso9chODgJiQs,23503
celery/tests/bin/proj/__init__.py,sha256=Q9qt46aWx0dx_SFfyigaH4goU1_ea01l7T4dhpDEeSs,104
celery/tests/bin/proj/app.py,sha256=ZpUV5lnfIiYBH1jMsap--ZQbX9YWk-zEO_2RTwI7lYE,102
celery/tests/compat_modules/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
celery/tests/compat_modules/test_compat.py,sha256=q840-7jXVOMxpB5qS-5Pv99pZXPEeDMx15__SJVEHMI,2376
celery/tests/compat_modules/test_compat_utils.py,sha256=0GQ1cxCiK8k4qOzvanBSSYLawO1vFEdmJaDAPz0AfCQ,1457
celery/tests/compat_modules/test_decorators.py,sha256=KS7ghG-RYiibnK4JcGZX_r-d9RsRAhKitLXA72WzsGA,1066
celery/tests/compat_modules/test_http.py,sha256=q1IaC7oUY9CEPUQga8t6RoMGbQQxBCGC3gODskqW3LU,5008
celery/tests/compat_modules/test_messaging.py,sha256=XsQIR6vdtnfCpcPchGJUND1d6t6Mi7Cqjo0yJ3TY0zQ,357
celery/tests/compat_modules/test_sets.py,sha256=h5yzbwuLtVqQHeY7INq9nmERApnhwWs1EbrfP8Lbkh8,7630
celery/tests/concurrency/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
celery/tests/concurrency/test_concurrency.py,sha256=saYW1_SeBdRJTUwx_9wtNpZXslDJQCQsfcmoNS2BIZ4,3163
celery/tests/concurrency/test_eventlet.py,sha256=hWsEQlZbSqQoPfjBM8xDq7ZeRJ-UJePxj8xlrmJ96dQ,3303
celery/tests/concurrency/test_gevent.py,sha256=n8WCZO9JLTPOjVajRKPlaHI_qPRC6tr3DgVPO_3lZ20,4309
celery/tests/concurrency/test_pool.py,sha256=nKgYR3rHtsuqcxKSGqC_tMF2glqIiecDZMEGG1bYCK4,2326
celery/tests/concurrency/test_prefork.py,sha256=lSfo-sVt_f6rPjQNNV7hQ1wNGghM5SWwztO_ubcbx58,8490
celery/tests/concurrency/test_solo.py,sha256=sljYxvp-oY4wSHftFOwXR5jSDCBZvmu_AswysJfeDSg,531
celery/tests/concurrency/test_threads.py,sha256=8PkYbDDxdiPe3vWvKs3kQoEhPEV4MEVMoapeUQcooAY,1861
celery/tests/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
celery/tests/contrib/test_abortable.py,sha256=61ys7MX9IgVZ5KsYMZKLoaLxovRcMQL8kz7DK8GHEWg,1513
celery/tests/contrib/test_methods.py,sha256=_xxqh_QobP8kP_Y0YS-GvYGIFLp6L-aeL8qeSles4DQ,840
celery/tests/contrib/test_migrate.py,sha256=tHMo0uQ-htzmIv9WBC0-KdLZeLk-75CKqLX2uFLn46Y,11182
celery/tests/contrib/test_rdb.py,sha256=ubWjYB-0hzPXqVtAyeLw99a4DpdAGBmade9Fh70tKbU,3093
celery/tests/events/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
celery/tests/events/test_cursesmon.py,sha256=iK8iwm8MtIVUiiWKbzW4WrWdCVX3hBPb4yAwYIrWetM,2653
celery/tests/events/test_events.py,sha256=hKE-0cIMG8H1_91H9i2fB430J7ii-H2WzTS3q51cdis,8527
celery/tests/events/test_snapshot.py,sha256=WeTY_uUeKNVSTxVtvAO2xYmftYlwA8uivd2KsmeNWjk,3734
celery/tests/events/test_state.py,sha256=6exI3OaJ3eMCSYt1_gCgBTzYZ_6lVfm2SjSyVK09V90,18838
celery/tests/fixups/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
celery/tests/fixups/test_django.py,sha256=LMJEHFjXpS2AY9J9lM03vxh9QOay15HUWj1s7hEAGps,11892
celery/tests/functional/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
celery/tests/functional/case.py,sha256=hJGE0oy6ABIuBc3osBPQid7KwaKefe8WvsuIrKQkkwg,5599
celery/tests/functional/tasks.py,sha256=nfDlTt_urjMpu-6ug4KWD5BieWFdxQbkhEVPepfEE_8,341
celery/tests/security/__init__.py,sha256=ivc_G0iCuqZ1bbKyEABXdcH6X_nXZLIq5MlYgCP6z-A,3623
celery/tests/security/case.py,sha256=YQ_4RTsCEkPxamivvitHvqsgbkStx-13ma00dwG2MMQ,403
celery/tests/security/test_certificate.py,sha256=IADR90BtZUo9wOTX_K6QIHFB3qMqALatGnWaB90cfBA,2633
celery/tests/security/test_key.py,sha256=xMmVbUbB4TzVUq8XZRS2jjuv6hu0AwUXrum-PLTIDqM,845
celery/tests/security/test_security.py,sha256=QR7KlWiukB0sFtjLVhJDFzQBBWwbMshbzG6jef_RPFI,3845
celery/tests/security/test_serialization.py,sha256=o0i-FO22l8kbJNSf8ajSg9cIE_oXH3QpECWfwA2bv1k,2252
celery/tests/slow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
celery/tests/tasks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
celery/tests/tasks/test_canvas.py,sha256=Zu0iO8JxUajNzcOcpgo_kYoTplHS5eI1CgulBSypkaU,10664
celery/tests/tasks/test_chord.py,sha256=jHLjW-4QwCEkag7uwhnvTcFB3-gdbFpIm0dal_QQO8w,7007
celery/tests/tasks/test_context.py,sha256=o89z1fvYROuiIYM_HW3DpFaWz6y8-dIze2TSc2UmXoA,2546
celery/tests/tasks/test_result.py,sha256=aMOqbAaf6SgtrNBwIWbjDC7pDFcNy0nWzabQIiuHHuo,24135
celery/tests/tasks/test_states.py,sha256=z2OV113N4EPS33AZu3chN3XGEbPIrKmYa699gdIFHI4,1317
celery/tests/tasks/test_tasks.py,sha256=CIF1MhneGOIUvUelwcBD7j6hUSDevgBVEQd7i6ral5I,15806
celery/tests/tasks/test_trace.py,sha256=T8ZyKBfccSNTzmXc8_FyJURBO-kTaikijPLOBLDBVXU,6770
celery/tests/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
celery/tests/utils/test_datastructures.py,sha256=b1nsrozGQTPMVgS5OaT6RrBQNOQgV5DDksadFIi97qc,10629
celery/tests/utils/test_dispatcher.py,sha256=sZMai1M6cufvaXUcDnD4lLVMUdWM6txOAYsvNq-EDqg,3873
celery/tests/utils/test_encoding.py,sha256=Lk5BF_Sr8QfcBndp5ldvzmVUwNBA1p_LjKo3t1rGk8c,526
celery/tests/utils/test_functional.py,sha256=riIDlFNhFfmGa8VH04EEhE2HCUtvlF-ID6epYjLeb5A,5074
celery/tests/utils/test_imports.py,sha256=rZ-Cjt1SYEvVO7SToxTk5bVmS0yW9Qnt754qX2PGeP0,1284
celery/tests/utils/test_local.py,sha256=zmP1lZbgmMgFauUeVtEr5maQXWguS6LUxDExXTzSrIk,9755
celery/tests/utils/test_mail.py,sha256=GJLoH4UAjxNWdFP-vBagjzGQnwuUvtRr45gSF8WXmLY,1594
celery/tests/utils/test_pickle.py,sha256=j1RuTZJLLw79cquX0rpVy-6BHULvF8Jf0iwF7jOPVVk,1572
celery/tests/utils/test_platforms.py,sha256=PYJPbu5xl22Ikit7h6Bik82xzDGxFQ8BhzmRWIyHcXU,23906
celery/tests/utils/test_saferef.py,sha256=sGvHI0iGfpN2p83SaDABRTrHuHNfg2fpFUlbWHpRNis,2050
celery/tests/utils/test_serialization.py,sha256=wiQPcEhVdNPpKqIIG0akHJ1HADDKGGTm45r5f36LzAQ,1129
celery/tests/utils/test_sysinfo.py,sha256=wJpb59DawWxJ1ol00RDV1ML_kS-3475amczYgtbnj6Q,909
celery/tests/utils/test_term.py,sha256=9UdtJKag7NOAaryRoTN_xzoE0SYcDGOdf4S9Dfh62Ww,2633
celery/tests/utils/test_text.py,sha256=0vomEwnuw0hbA-081xFZso1X8uQ0bx1sDx5lxBDWD3w,2179
celery/tests/utils/test_threads.py,sha256=RFIaXkJ0TdyXzoGAnHg9t7QhEIEMe44cSFrxYp-gDgA,2666
celery/tests/utils/test_timer2.py,sha256=z3mxGq3WcpTXe2mwlfHGMj_HkVsFu9YyDkrhA2Wo_s0,5099
celery/tests/utils/test_timeutils.py,sha256=u_8BEOt04m21JPCjm71nnbvFpEsIxGRQt6aDV_BPieM,8405
celery/tests/utils/test_utils.py,sha256=GKEN-d4kK0NxSdAn-nnN_WQlJGOqx4RR4tarRTX26ss,2812
celery/tests/worker/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
celery/tests/worker/test_autoreload.py,sha256=5Vs727vOhs6WCMwYe4RMQcjaTIVO-hPFxXdD5Ka2a0M,9877
celery/tests/worker/test_autoscale.py,sha256=LNAObLqwy79pF0xLIWR36dk6VtL5Rq0uOT7oLQW3ZD4,5900
celery/tests/worker/test_bootsteps.py,sha256=8QEzvNayK7oCjCAaX005-fvn11PK4_VXtr7EkSaXaok,9132
celery/tests/worker/test_components.py,sha256=_0k_lYjst-zh5bwy-GlPMFgaGUsiZdeyu4ycUEnM8b0,920
celery/tests/worker/test_consumer.py,sha256=8B3WloJo3sY2pzCkMUPZHg7R5u8rNihaS1VGeleLajo,16490
celery/tests/worker/test_control.py,sha256=iY6BEvjy4jDk5sy7RTgpatz7ZzaJK-JrvF-EgiNrk1Q,21324
celery/tests/worker/test_heartbeat.py,sha256=AoLPVZdyBZO6-F3JPdMRPC2O1hAYszFIFDPR3-4L3C8,1678
celery/tests/worker/test_hub.py,sha256=iOPrKj-LN0Ts_OAhaljpjBq5XhYU_KtY51dZq8zGiIM,9735
celery/tests/worker/test_loops.py,sha256=DMC4xqBQhuRICNwUhPXngM6avUDootuY7LxtKhZ5SAE,14533
celery/tests/worker/test_request.py,sha256=KEEoQoGkUV81W9BmkOahMIepuJpTGvnsTreFAxrI1-g,31467
celery/tests/worker/test_revoke.py,sha256=v9ZEOEspe565G8eRAup17o5cXA2BDRiiwxpPgGRDNRo,336
celery/tests/worker/test_state.py,sha256=x7vtdk05Z44KQiwJOJTPHvebKMDCNy4ErY6_j4suFNs,4595
celery/tests/worker/test_strategy.py,sha256=NIMURR2DosEY21Jx0KBk3Rz4fpYcuLZ4doFpsUqzFjc,4624
celery/tests/worker/test_worker.py,sha256=9IcP8_WT4ujLSPL-v5MGp4fwUpUAjLHISJNBM77tzcs,38397
celery/utils/__init__.py,sha256=kkA4rLGtWwH9m8-kjDxh6pfgf0SGYO-yBag-vrsUEBs,12713
celery/utils/compat.py,sha256=oV2FXmhw_Yq7ub_RWl-XRZBJmd6xMpdrpaeIXvPgFt8,34
celery/utils/debug.py,sha256=GihMTBeKlKYs-0lr3f2TXq1lgBh4CC-VhZsO-zkCQ98,3751
celery/utils/encoding.py,sha256=yh10Ml0TsdibU3EGbd2lvDTpNvxtD6yN_2o8LI7sEno,361
celery/utils/functional.py,sha256=C9CsNmx_VyB3U2Zwc83eIkDAD50dJN6ayWDukUK9b60,8814
celery/utils/imports.py,sha256=oSzhVyyt9DZs2KtLqrkOOMwsOePPC_A6h7LeaZsoxJw,2914
celery/utils/iso8601.py,sha256=zA4OeMDxKGzNEV6aFOWAZzpix7i6VUJms1vabPyx0B8,2738
celery/utils/log.py,sha256=UYSFLqkxKNXpBbhfY9kZGn4jOVyKrfld-SmDiY2nYOQ,9292
celery/utils/mail.py,sha256=rnhrwfJXl5cP_KOtcPWypAhBihnm0Fa5U7Xep36QqZ0,4944
celery/utils/objects.py,sha256=grHN_y3LnktQPQI8eTw9vBwR6KcPBT-BRUL2VJHr6w4,2762
celery/utils/serialization.py,sha256=Wgo-K628_x1dJTeClG5TWJbKxxfiQrAkEUvE41nRX5s,4869
celery/utils/sysinfo.py,sha256=zlQAlqJgIt0SGG8AnIYvQRiy0yK9D2cC_RtmJpPz0Ac,993
celery/utils/term.py,sha256=zBgNYbw86wuLvmEHG18-wXycmgqNiPxQ8bNVWt5bpk4,3927
celery/utils/text.py,sha256=r5j7bXZr6gAnzr_TGfRT5Lp2OgHi6mPOu8lTmIq8_ss,2020
celery/utils/threads.py,sha256=Ef1d7pj1loMilftUPqtbGhcQe1NoHPFlbtMHsqd-u04,9636
celery/utils/timer2.py,sha256=zj3p0jH7lxpmWUAAaCS1EH6ubWp1m3vmyRWd8fCV6CA,4236
celery/utils/timeutils.py,sha256=VcSgnUv9SmBq7Pcf6YdumLDcSlSpQt1U-Higr-NG0IA,11193
celery/utils/dispatch/__init__.py,sha256=o1QuagJss6zaoNcLLotHHs94Eii7e4VNqu8j2Zj58y0,113
celery/utils/dispatch/saferef.py,sha256=E2OXv4ViRbDh8zkQLerQwOeMfNkPmpu1HmxlmSScJbs,10894
celery/utils/dispatch/signal.py,sha256=1K6bmvN7QdhSyfLwxflTmsxIQrpSirix5bxjjLTE4D0,8343
celery/worker/__init__.py,sha256=vFwzEd6pUJTu1joU9OL_FIPY6DG4KNcrXJyuJRuGnPw,13641
celery/worker/autoreload.py,sha256=svnUXyQqm2QlKBiUJWGJS51DcmedEtQgzKp7HYEuf0E,8868
celery/worker/autoscale.py,sha256=e6iN0hq6FlOvsA9MmIulWySZxiRQNVAc9_ILZtLWetc,4864
celery/worker/components.py,sha256=I3RmLjA7f0bQ8mFrSpLNH9s-j9Gg0sBptZx7wG9X3ro,7580
celery/worker/consumer.py,sha256=AGmtw7dHAPHYmx1DLy3R2GbLJa30KXHoaMrLKmwLrzk,29347
celery/worker/control.py,sha256=6IfSRbMSB7R9yXUGlR4sdkoJderRvKh-uty8tqclejM,11410
celery/worker/heartbeat.py,sha256=NAM8Bq7ywHabXclltgrnCQb6FbnBh3sLPEveycNP3sk,1737
celery/worker/job.py,sha256=bmkKSVd5BuHcGdQ_gn3MJeeLkx_-iBvKTRTImLrtBYI,21054
celery/worker/loops.py,sha256=uAQDdHg-hAo7RvgqVMhgvOkDqmAkJDVGj4FgQNzObAc,3420
celery/worker/pidbox.py,sha256=wfephMpB1gJu0f0WPUFrsTSPQjSGvwp3FCJNTcPtHzo,3447
celery/worker/request.py,sha256=twDXCdrvS7T0KAlknT_XubTINPeoXyuqVPNnSsEqQgM,18826
celery/worker/state.py,sha256=i2DbvX5v483Lyf_VUUKbzp7oMCHSvq5sMbb3A3G1zx4,6791
celery/worker/strategy.py,sha256=TlObf6FkTTEeGF0FTbkkmh5Axl-IXiNxHZG0ec6C_DQ,3087
celery-3.1.26.post2.dist-info/DESCRIPTION.rst,sha256=ZjBRr8syYhEecvIb6tx8S15p0Lgv7cWd3DpkJxw8gFs,11599
celery-3.1.26.post2.dist-info/METADATA,sha256=0QZInn5VoWtzmvqD8gOQYch2rELrfOeA4v5baTqLnT8,14355
celery-3.1.26.post2.dist-info/RECORD,,
celery-3.1.26.post2.dist-info/WHEEL,sha256=kdsN-5OJAZIiHN-iO4Rhl82KyS0bDWf4uBwMbkNafr8,110
celery-3.1.26.post2.dist-info/entry_points.txt,sha256=Cx6fgw30zDMkid9S17TYinhJwJHG5MjMfwZNGqDsTb4,178
celery-3.1.26.post2.dist-info/metadata.json,sha256=wI1gtk7Xfkv36kqvqr7aIy34p86b3R_XDTsh-eJd3IA,3169
celery-3.1.26.post2.dist-info/top_level.txt,sha256=sQQ-a5HNsZIi2A8DiKQnB1HODFMfmrzIAZIE8t_XiOA,7
../../../bin/celery,sha256=reolwO892Sx1ruHQnX6Gb7v-Su0tWTjipUH7c7xDZQc,246
../../../bin/celerybeat,sha256=goFpTFIXyk1hqyNFRA1KfbG61c9lJLp1wSo2pRe3mnU,262
../../../bin/celeryd,sha256=tl_DPKb1fRWEd_McTOvrwTdSgYw3U4PtFFRb9UnrFFs,266
../../../bin/celeryd-multi,sha256=Ktk0eE1NxFhtnA9MWP_AberKfyVK307SoM2SCVhQHto,264
celery-3.1.26.post2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
celery/app/__pycache__/amqp.cpython-36.pyc,,
celery/app/__pycache__/annotations.cpython-36.pyc,,
celery/app/__pycache__/task.cpython-36.pyc,,
celery/app/__pycache__/control.cpython-36.pyc,,
celery/app/__pycache__/trace.cpython-36.pyc,,
celery/app/__pycache__/builtins.cpython-36.pyc,,
celery/app/__pycache__/base.cpython-36.pyc,,
celery/app/__pycache__/log.cpython-36.pyc,,
celery/app/__pycache__/defaults.cpython-36.pyc,,
celery/app/__pycache__/registry.cpython-36.pyc,,
celery/app/__pycache__/utils.cpython-36.pyc,,
celery/app/__pycache__/routes.cpython-36.pyc,,
celery/app/__pycache__/__init__.cpython-36.pyc,,
celery/bin/__pycache__/amqp.cpython-36.pyc,,
celery/bin/__pycache__/beat.cpython-36.pyc,,
celery/bin/__pycache__/celeryd_detach.cpython-36.pyc,,
celery/bin/__pycache__/multi.cpython-36.pyc,,
celery/bin/__pycache__/base.cpython-36.pyc,,
celery/bin/__pycache__/celery.cpython-36.pyc,,
celery/bin/__pycache__/__init__.cpython-36.pyc,,
celery/bin/__pycache__/worker.cpython-36.pyc,,
celery/bin/__pycache__/graph.cpython-36.pyc,,
celery/bin/__pycache__/events.cpython-36.pyc,,
celery/security/__pycache__/key.cpython-36.pyc,,
celery/security/__pycache__/certificate.cpython-36.pyc,,
celery/security/__pycache__/utils.cpython-36.pyc,,
celery/security/__pycache__/serialization.cpython-36.pyc,,
celery/security/__pycache__/__init__.cpython-36.pyc,,
celery/backends/database/__pycache__/models.cpython-36.pyc,,
celery/backends/database/__pycache__/session.cpython-36.pyc,,
celery/backends/database/__pycache__/__init__.cpython-36.pyc,,
celery/backends/__pycache__/amqp.cpython-36.pyc,,
celery/backends/__pycache__/cassandra.cpython-36.pyc,,
celery/backends/__pycache__/rpc.cpython-36.pyc,,
celery/backends/__pycache__/base.cpython-36.pyc,,
celery/backends/__pycache__/couchbase.cpython-36.pyc,,
celery/backends/__pycache__/cache.cpython-36.pyc,,
celery/backends/__pycache__/mongodb.cpython-36.pyc,,
celery/backends/__pycache__/__init__.cpython-36.pyc,,
celery/backends/__pycache__/redis.cpython-36.pyc,,
celery/tests/tasks/__pycache__/test_chord.cpython-36.pyc,,
celery/tests/tasks/__pycache__/test_states.cpython-36.pyc,,
celery/tests/tasks/__pycache__/test_tasks.cpython-36.pyc,,
celery/tests/tasks/__pycache__/test_canvas.cpython-36.pyc,,
celery/tests/tasks/__pycache__/test_context.cpython-36.pyc,,
celery/tests/tasks/__pycache__/test_result.cpython-36.pyc,,
celery/tests/tasks/__pycache__/__init__.cpython-36.pyc,,
celery/tests/tasks/__pycache__/test_trace.cpython-36.pyc,,
celery/tests/app/__pycache__/test_defaults.cpython-36.pyc,,
celery/tests/app/__pycache__/test_registry.cpython-36.pyc,,
celery/tests/app/__pycache__/test_loaders.cpython-36.pyc,,
celery/tests/app/__pycache__/test_builtins.cpython-36.pyc,,
celery/tests/app/__pycache__/test_log.cpython-36.pyc,,
celery/tests/app/__pycache__/test_utils.cpython-36.pyc,,
celery/tests/app/__pycache__/test_control.cpython-36.pyc,,
celery/tests/app/__pycache__/test_celery.cpython-36.pyc,,
celery/tests/app/__pycache__/test_routes.cpython-36.pyc,,
celery/tests/app/__pycache__/test_annotations.cpython-36.pyc,,
celery/tests/app/__pycache__/test_exceptions.cpython-36.pyc,,
celery/tests/app/__pycache__/test_beat.cpython-36.pyc,,
celery/tests/app/__pycache__/test_amqp.cpython-36.pyc,,
celery/tests/app/__pycache__/test_app.cpython-36.pyc,,
celery/tests/app/__pycache__/test_schedules.cpython-36.pyc,,
celery/tests/app/__pycache__/__init__.cpython-36.pyc,,
celery/tests/bin/proj/__pycache__/app.cpython-36.pyc,,
celery/tests/bin/proj/__pycache__/__init__.cpython-36.pyc,,
celery/tests/bin/__pycache__/test_worker.cpython-36.pyc,,
celery/tests/bin/__pycache__/test_events.cpython-36.pyc,,
celery/tests/bin/__pycache__/test_base.cpython-36.pyc,,
celery/tests/bin/__pycache__/test_celery.cpython-36.pyc,,
celery/tests/bin/__pycache__/test_celeryevdump.cpython-36.pyc,,
celery/tests/bin/__pycache__/test_multi.cpython-36.pyc,,
celery/tests/bin/__pycache__/test_beat.cpython-36.pyc,,
celery/tests/bin/__pycache__/test_amqp.cpython-36.pyc,,
celery/tests/bin/__pycache__/test_celeryd_detach.cpython-36.pyc,,
celery/tests/bin/__pycache__/__init__.cpython-36.pyc,,
celery/tests/security/__pycache__/test_security.cpython-36.pyc,,
celery/tests/security/__pycache__/test_certificate.cpython-36.pyc,,
celery/tests/security/__pycache__/test_serialization.cpython-36.pyc,,
celery/tests/security/__pycache__/test_key.cpython-36.pyc,,
celery/tests/security/__pycache__/case.cpython-36.pyc,,
celery/tests/security/__pycache__/__init__.cpython-36.pyc,,
celery/tests/slow/__pycache__/__init__.cpython-36.pyc,,
celery/tests/backends/__pycache__/test_cache.cpython-36.pyc,,
celery/tests/backends/__pycache__/test_backends.cpython-36.pyc,,
celery/tests/backends/__pycache__/test_base.cpython-36.pyc,,
celery/tests/backends/__pycache__/test_redis.cpython-36.pyc,,
celery/tests/backends/__pycache__/test_couchbase.cpython-36.pyc,,
celery/tests/backends/__pycache__/test_mongodb.cpython-36.pyc,,
celery/tests/backends/__pycache__/test_database.cpython-36.pyc,,
celery/tests/backends/__pycache__/test_amqp.cpython-36.pyc,,
celery/tests/backends/__pycache__/test_rpc.cpython-36.pyc,,
celery/tests/backends/__pycache__/__init__.cpython-36.pyc,,
celery/tests/backends/__pycache__/test_cassandra.cpython-36.pyc,,
celery/tests/compat_modules/__pycache__/test_decorators.cpython-36.pyc,,
celery/tests/compat_modules/__pycache__/test_sets.cpython-36.pyc,,
celery/tests/compat_modules/__pycache__/test_http.cpython-36.pyc,,
celery/tests/compat_modules/__pycache__/test_compat.cpython-36.pyc,,
celery/tests/compat_modules/__pycache__/test_messaging.cpython-36.pyc,,
celery/tests/compat_modules/__pycache__/test_compat_utils.cpython-36.pyc,,
celery/tests/compat_modules/__pycache__/__init__.cpython-36.pyc,,
celery/tests/utils/__pycache__/test_pickle.cpython-36.pyc,,
celery/tests/utils/__pycache__/test_local.cpython-36.pyc,,
celery/tests/utils/__pycache__/test_utils.cpython-36.pyc,,
celery/tests/utils/__pycache__/test_imports.cpython-36.pyc,,
celery/tests/utils/__pycache__/test_sysinfo.cpython-36.pyc,,
celery/tests/utils/__pycache__/test_platforms.cpython-36.pyc,,
celery/tests/utils/__pycache__/test_serialization.cpython-36.pyc,,
celery/tests/utils/__pycache__/test_saferef.cpython-36.pyc,,
celery/tests/utils/__pycache__/test_timeutils.cpython-36.pyc,,
celery/tests/utils/__pycache__/test_text.cpython-36.pyc,,
celery/tests/utils/__pycache__/test_datastructures.cpython-36.pyc,,
celery/tests/utils/__pycache__/test_encoding.cpython-36.pyc,,
celery/tests/utils/__pycache__/test_timer2.cpython-36.pyc,,
celery/tests/utils/__pycache__/test_term.cpython-36.pyc,,
celery/tests/utils/__pycache__/__init__.cpython-36.pyc,,
celery/tests/utils/__pycache__/test_dispatcher.cpython-36.pyc,,
celery/tests/utils/__pycache__/test_mail.cpython-36.pyc,,
celery/tests/utils/__pycache__/test_functional.cpython-36.pyc,,
celery/tests/utils/__pycache__/test_threads.cpython-36.pyc,,
celery/tests/__pycache__/case.cpython-36.pyc,,
celery/tests/__pycache__/__init__.cpython-36.pyc,,
celery/tests/contrib/__pycache__/test_migrate.cpython-36.pyc,,
celery/tests/contrib/__pycache__/test_rdb.cpython-36.pyc,,
celery/tests/contrib/__pycache__/test_abortable.cpython-36.pyc,,
celery/tests/contrib/__pycache__/test_methods.cpython-36.pyc,,
celery/tests/contrib/__pycache__/__init__.cpython-36.pyc,,
celery/tests/concurrency/__pycache__/test_solo.cpython-36.pyc,,
celery/tests/concurrency/__pycache__/test_gevent.cpython-36.pyc,,
celery/tests/concurrency/__pycache__/test_concurrency.cpython-36.pyc,,
celery/tests/concurrency/__pycache__/test_eventlet.cpython-36.pyc,,
celery/tests/concurrency/__pycache__/test_pool.cpython-36.pyc,,
celery/tests/concurrency/__pycache__/__init__.cpython-36.pyc,,
celery/tests/concurrency/__pycache__/test_prefork.cpython-36.pyc,,
celery/tests/concurrency/__pycache__/test_threads.cpython-36.pyc,,
celery/tests/fixups/__pycache__/test_django.cpython-36.pyc,,
celery/tests/fixups/__pycache__/__init__.cpython-36.pyc,,
celery/tests/worker/__pycache__/test_consumer.cpython-36.pyc,,
celery/tests/worker/__pycache__/test_request.cpython-36.pyc,,
celery/tests/worker/__pycache__/test_autoreload.cpython-36.pyc,,
celery/tests/worker/__pycache__/test_worker.cpython-36.pyc,,
celery/tests/worker/__pycache__/test_control.cpython-36.pyc,,
celery/tests/worker/__pycache__/test_components.cpython-36.pyc,,
celery/tests/worker/__pycache__/test_revoke.cpython-36.pyc,,
celery/tests/worker/__pycache__/test_hub.cpython-36.pyc,,
celery/tests/worker/__pycache__/test_bootsteps.cpython-36.pyc,,
celery/tests/worker/__pycache__/test_autoscale.cpython-36.pyc,,
celery/tests/worker/__pycache__/test_state.cpython-36.pyc,,
celery/tests/worker/__pycache__/__init__.cpython-36.pyc,,
celery/tests/worker/__pycache__/test_heartbeat.cpython-36.pyc,,
celery/tests/worker/__pycache__/test_loops.cpython-36.pyc,,
celery/tests/worker/__pycache__/test_strategy.cpython-36.pyc,,
celery/tests/events/__pycache__/test_cursesmon.cpython-36.pyc,,
celery/tests/events/__pycache__/test_events.cpython-36.pyc,,
celery/tests/events/__pycache__/test_snapshot.cpython-36.pyc,,
celery/tests/events/__pycache__/test_state.cpython-36.pyc,,
celery/tests/events/__pycache__/__init__.cpython-36.pyc,,
celery/tests/functional/__pycache__/tasks.cpython-36.pyc,,
celery/tests/functional/__pycache__/case.cpython-36.pyc,,
celery/tests/functional/__pycache__/__init__.cpython-36.pyc,,
celery/utils/dispatch/__pycache__/saferef.cpython-36.pyc,,
celery/utils/dispatch/__pycache__/signal.cpython-36.pyc,,
celery/utils/dispatch/__pycache__/__init__.cpython-36.pyc,,
celery/utils/__pycache__/timer2.cpython-36.pyc,,
celery/utils/__pycache__/debug.cpython-36.pyc,,
celery/utils/__pycache__/sysinfo.cpython-36.pyc,,
celery/utils/__pycache__/term.cpython-36.pyc,,
celery/utils/__pycache__/imports.cpython-36.pyc,,
celery/utils/__pycache__/mail.cpython-36.pyc,,
celery/utils/__pycache__/functional.cpython-36.pyc,,
celery/utils/__pycache__/timeutils.cpython-36.pyc,,
celery/utils/__pycache__/objects.cpython-36.pyc,,
celery/utils/__pycache__/text.cpython-36.pyc,,
celery/utils/__pycache__/encoding.cpython-36.pyc,,
celery/utils/__pycache__/compat.cpython-36.pyc,,
celery/utils/__pycache__/log.cpython-36.pyc,,
celery/utils/__pycache__/threads.cpython-36.pyc,,
celery/utils/__pycache__/iso8601.cpython-36.pyc,,
celery/utils/__pycache__/serialization.cpython-36.pyc,,
celery/utils/__pycache__/__init__.cpython-36.pyc,,
celery/__pycache__/beat.cpython-36.pyc,,
celery/__pycache__/schedules.cpython-36.pyc,,
celery/__pycache__/exceptions.cpython-36.pyc,,
celery/__pycache__/datastructures.cpython-36.pyc,,
celery/__pycache__/result.cpython-36.pyc,,
celery/__pycache__/signals.cpython-36.pyc,,
celery/__pycache__/_state.cpython-36.pyc,,
celery/__pycache__/__main__.cpython-36.pyc,,
celery/__pycache__/canvas.cpython-36.pyc,,
celery/__pycache__/five.cpython-36.pyc,,
celery/__pycache__/local.cpython-36.pyc,,
celery/__pycache__/bootsteps.cpython-36.pyc,,
celery/__pycache__/platforms.cpython-36.pyc,,
celery/__pycache__/states.cpython-36.pyc,,
celery/__pycache__/__init__.cpython-36.pyc,,
celery/contrib/__pycache__/rdb.cpython-36.pyc,,
celery/contrib/__pycache__/migrate.cpython-36.pyc,,
celery/contrib/__pycache__/abortable.cpython-36.pyc,,
celery/contrib/__pycache__/batches.cpython-36.pyc,,
celery/contrib/__pycache__/methods.cpython-36.pyc,,
celery/contrib/__pycache__/sphinx.cpython-36.pyc,,
celery/contrib/__pycache__/__init__.cpython-36.pyc,,
celery/concurrency/__pycache__/asynpool.cpython-36.pyc,,
celery/concurrency/__pycache__/gevent.cpython-36.pyc,,
celery/concurrency/__pycache__/base.cpython-36.pyc,,
celery/concurrency/__pycache__/threads.cpython-36.pyc,,
celery/concurrency/__pycache__/prefork.cpython-36.pyc,,
celery/concurrency/__pycache__/eventlet.cpython-36.pyc,,
celery/concurrency/__pycache__/__init__.cpython-36.pyc,,
celery/concurrency/__pycache__/solo.cpython-36.pyc,,
celery/task/__pycache__/trace.cpython-36.pyc,,
celery/task/__pycache__/sets.cpython-36.pyc,,
celery/task/__pycache__/base.cpython-36.pyc,,
celery/task/__pycache__/http.cpython-36.pyc,,
celery/task/__pycache__/__init__.cpython-36.pyc,,
celery/fixups/__pycache__/django.cpython-36.pyc,,
celery/fixups/__pycache__/__init__.cpython-36.pyc,,
celery/worker/__pycache__/heartbeat.cpython-36.pyc,,
celery/worker/__pycache__/autoscale.cpython-36.pyc,,
celery/worker/__pycache__/strategy.cpython-36.pyc,,
celery/worker/__pycache__/request.cpython-36.pyc,,
celery/worker/__pycache__/job.cpython-36.pyc,,
celery/worker/__pycache__/state.cpython-36.pyc,,
celery/worker/__pycache__/control.cpython-36.pyc,,
celery/worker/__pycache__/pidbox.cpython-36.pyc,,
celery/worker/__pycache__/loops.cpython-36.pyc,,
celery/worker/__pycache__/components.cpython-36.pyc,,
celery/worker/__pycache__/consumer.cpython-36.pyc,,
celery/worker/__pycache__/autoreload.cpython-36.pyc,,
celery/worker/__pycache__/__init__.cpython-36.pyc,,
celery/events/__pycache__/state.cpython-36.pyc,,
celery/events/__pycache__/cursesmon.cpython-36.pyc,,
celery/events/__pycache__/__init__.cpython-36.pyc,,
celery/events/__pycache__/dumper.cpython-36.pyc,,
celery/events/__pycache__/snapshot.cpython-36.pyc,,
celery/apps/__pycache__/beat.cpython-36.pyc,,
celery/apps/__pycache__/__init__.cpython-36.pyc,,
celery/apps/__pycache__/worker.cpython-36.pyc,,
celery/loaders/__pycache__/app.cpython-36.pyc,,
celery/loaders/__pycache__/default.cpython-36.pyc,,
celery/loaders/__pycache__/base.cpython-36.pyc,,
celery/loaders/__pycache__/__init__.cpython-36.pyc,,

+ 6
- 0
thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/WHEEL View File

@@ -0,0 +1,6 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.30.0)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any


+ 6
- 0
thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/entry_points.txt View File

@@ -0,0 +1,6 @@
[console_scripts]
celery = celery.__main__:main
celerybeat = celery.__main__:_compat_beat
celeryd = celery.__main__:_compat_worker
celeryd-multi = celery.__main__:_compat_multi


+ 1
- 0
thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/metadata.json View File

@@ -0,0 +1 @@
{"classifiers": ["Development Status :: 5 - Production/Stable", "License :: OSI Approved :: BSD License", "Topic :: System :: Distributed Computing", "Topic :: Software Development :: Object Brokering", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Programming Language :: Python :: Implementation :: Jython", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: Microsoft :: Windows", "Operating System :: MacOS :: MacOS X"], "description_content_type": "UNKNOWN", "extensions": {"python.commands": {"wrap_console": {"celery": "celery.__main__:main", "celerybeat": "celery.__main__:_compat_beat", "celeryd": "celery.__main__:_compat_worker", "celeryd-multi": "celery.__main__:_compat_multi"}}, "python.details": {"contacts": [{"email": "ask@celeryproject.org", "name": "Ask Solem", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://celeryproject.org"}}, "python.exports": {"console_scripts": {"celery": "celery.__main__:main", "celerybeat": "celery.__main__:_compat_beat", "celeryd": "celery.__main__:_compat_worker", "celeryd-multi": "celery.__main__:_compat_multi"}}}, "extras": ["auth", "beanstalk", "cassandra", "couchbase", "couchdb", "eventlet", "gevent", "librabbitmq", "memcache", "mongodb", "msgpack", "pyro", "redis", "slmq", "sqlalchemy", "sqs", "threads", "yaml", "zeromq", "zookeeper"], "generator": "bdist_wheel (0.30.0)", "license": "BSD", "metadata_version": "2.0", "name": "celery", "platform": "any", "run_requires": [{"extra": "yaml", "requires": ["PyYAML (>=3.10)"]}, {"extra": "beanstalk", "requires": ["beanstalkc"]}, {"requires": ["billiard (<3.4,>=3.3.0.23)", "kombu (<3.1,>=3.0.37)", "pytz (>dev)"]}, {"extra": "sqs", "requires": ["boto (>=2.13.3)"]}, {"extra": "couchbase", "requires": ["couchbase"]}, {"extra": "couchdb", "requires": ["couchdb"]}, {"extra": "eventlet", "requires": ["eventlet"]}, {"extra": "gevent", "requires": ["gevent"]}, {"extra": "zookeeper", "requires": ["kazoo (>=1.3.1)"]}, {"extra": "librabbitmq", "requires": ["librabbitmq (>=1.6.1)"]}, {"extra": "msgpack", "requires": ["msgpack-python (>=0.3.0)"]}, {"extra": "auth", "requires": ["pyOpenSSL"]}, {"extra": "cassandra", "requires": ["pycassa"]}, {"extra": "memcache", "requires": ["pylibmc"]}, {"extra": "mongodb", "requires": ["pymongo (>=2.6.2)"]}, {"extra": "pyro", "requires": ["pyro4"]}, {"extra": "zeromq", "requires": ["pyzmq (>=13.1.0)"]}, {"extra": "redis", "requires": ["redis (>=2.8.0)"]}, {"extra": "slmq", "requires": ["softlayer-messaging (>=1.0.3)"]}, {"extra": "sqlalchemy", "requires": ["sqlalchemy"]}, {"extra": "threads", "requires": ["threadpool"]}], "summary": "Distributed Task Queue", "test_requires": [{"requires": ["mock (>=1.0.1)", "nose", "unittest2 (>=0.5.1)"]}], "version": "3.1.26.post2"}

+ 1
- 0
thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/top_level.txt View File

@@ -0,0 +1 @@
celery

+ 155
- 0
thesisenv/lib/python3.6/site-packages/celery/__init__.py View File

@@ -0,0 +1,155 @@
# -*- coding: utf-8 -*-
"""Distributed Task Queue"""
# :copyright: (c) 2015 Ask Solem and individual contributors.
# All rights # reserved.
# :copyright: (c) 2012-2014 GoPivotal, Inc., All rights reserved.
# :copyright: (c) 2009 - 2012 Ask Solem and individual contributors,
# All rights reserved.
# :license: BSD (3 Clause), see LICENSE for more details.

from __future__ import absolute_import

import os
import sys

from collections import namedtuple

version_info_t = namedtuple(
'version_info_t', ('major', 'minor', 'micro', 'releaselevel', 'serial'),
)

SERIES = 'Cipater'
VERSION = version_info_t(3, 1, 26, '.post2', '')
__version__ = '{0.major}.{0.minor}.{0.micro}{0.releaselevel}'.format(VERSION)
__author__ = 'Ask Solem'
__contact__ = 'ask@celeryproject.org'
__homepage__ = 'http://celeryproject.org'
__docformat__ = 'restructuredtext'
__all__ = [
'Celery', 'bugreport', 'shared_task', 'task',
'current_app', 'current_task', 'maybe_signature',
'chain', 'chord', 'chunks', 'group', 'signature',
'xmap', 'xstarmap', 'uuid', 'version', '__version__',
]
VERSION_BANNER = '{0} ({1})'.format(__version__, SERIES)

# -eof meta-

if os.environ.get('C_IMPDEBUG'): # pragma: no cover
from .five import builtins
real_import = builtins.__import__

def debug_import(name, locals=None, globals=None,
fromlist=None, level=-1):
glob = globals or getattr(sys, 'emarfteg_'[::-1])(1).f_globals
importer_name = glob and glob.get('__name__') or 'unknown'
print('-- {0} imports {1}'.format(importer_name, name))
return real_import(name, locals, globals, fromlist, level)
builtins.__import__ = debug_import

# This is never executed, but tricks static analyzers (PyDev, PyCharm,
# pylint, etc.) into knowing the types of these symbols, and what
# they contain.
STATICA_HACK = True
globals()['kcah_acitats'[::-1].upper()] = False
if STATICA_HACK: # pragma: no cover
from celery.app import shared_task # noqa
from celery.app.base import Celery # noqa
from celery.app.utils import bugreport # noqa
from celery.app.task import Task # noqa
from celery._state import current_app, current_task # noqa
from celery.canvas import ( # noqa
chain, chord, chunks, group,
signature, maybe_signature, xmap, xstarmap, subtask,
)
from celery.utils import uuid # noqa

# Eventlet/gevent patching must happen before importing
# anything else, so these tools must be at top-level.


def _find_option_with_arg(argv, short_opts=None, long_opts=None):
"""Search argv for option specifying its short and longopt
alternatives.

Return the value of the option if found.

"""
for i, arg in enumerate(argv):
if arg.startswith('-'):
if long_opts and arg.startswith('--'):
name, _, val = arg.partition('=')
if name in long_opts:
return val
if short_opts and arg in short_opts:
return argv[i + 1]
raise KeyError('|'.join(short_opts or [] + long_opts or []))


def _patch_eventlet():
import eventlet
import eventlet.debug
eventlet.monkey_patch()
EVENTLET_DBLOCK = int(os.environ.get('EVENTLET_NOBLOCK', 0))
if EVENTLET_DBLOCK:
eventlet.debug.hub_blocking_detection(EVENTLET_DBLOCK)


def _patch_gevent():
from gevent import monkey, version_info
monkey.patch_all()
if version_info[0] == 0: # pragma: no cover
# Signals aren't working in gevent versions <1.0,
# and are not monkey patched by patch_all()
from gevent import signal as _gevent_signal
_signal = __import__('signal')
_signal.signal = _gevent_signal


def maybe_patch_concurrency(argv=sys.argv,
short_opts=['-P'], long_opts=['--pool'],
patches={'eventlet': _patch_eventlet,
'gevent': _patch_gevent}):
"""With short and long opt alternatives that specify the command line
option to set the pool, this makes sure that anything that needs
to be patched is completed as early as possible.
(e.g. eventlet/gevent monkey patches)."""
try:
pool = _find_option_with_arg(argv, short_opts, long_opts)
except KeyError:
pass
else:
try:
patcher = patches[pool]
except KeyError:
pass
else:
patcher()
# set up eventlet/gevent environments ASAP.
from celery import concurrency
concurrency.get_implementation(pool)

# Lazy loading
from celery import five # noqa

old_module, new_module = five.recreate_module( # pragma: no cover
__name__,
by_module={
'celery.app': ['Celery', 'bugreport', 'shared_task'],
'celery.app.task': ['Task'],
'celery._state': ['current_app', 'current_task'],
'celery.canvas': ['chain', 'chord', 'chunks', 'group',
'signature', 'maybe_signature', 'subtask',
'xmap', 'xstarmap'],
'celery.utils': ['uuid'],
},
direct={'task': 'celery.task'},
__package__='celery', __file__=__file__,
__path__=__path__, __doc__=__doc__, __version__=__version__,
__author__=__author__, __contact__=__contact__,
__homepage__=__homepage__, __docformat__=__docformat__, five=five,
VERSION=VERSION, SERIES=SERIES, VERSION_BANNER=VERSION_BANNER,
version_info_t=version_info_t,
maybe_patch_concurrency=maybe_patch_concurrency,
_find_option_with_arg=_find_option_with_arg,
)

+ 54
- 0
thesisenv/lib/python3.6/site-packages/celery/__main__.py View File

@@ -0,0 +1,54 @@
from __future__ import absolute_import

import sys

from os.path import basename

from . import maybe_patch_concurrency

__all__ = ['main']

DEPRECATED_FMT = """
The {old!r} command is deprecated, please use {new!r} instead:

$ {new_argv}

"""


def _warn_deprecated(new):
print(DEPRECATED_FMT.format(
old=basename(sys.argv[0]), new=new,
new_argv=' '.join([new] + sys.argv[1:])),
)


def main():
if 'multi' not in sys.argv:
maybe_patch_concurrency()
from celery.bin.celery import main
main()


def _compat_worker():
maybe_patch_concurrency()
_warn_deprecated('celery worker')
from celery.bin.worker import main
main()


def _compat_multi():
_warn_deprecated('celery multi')
from celery.bin.multi import main
main()


def _compat_beat():
maybe_patch_concurrency()
_warn_deprecated('celery beat')
from celery.bin.beat import main
main()


if __name__ == '__main__': # pragma: no cover
main()

+ 159
- 0
thesisenv/lib/python3.6/site-packages/celery/_state.py View File

@@ -0,0 +1,159 @@
# -*- coding: utf-8 -*-
"""
celery._state
~~~~~~~~~~~~~~~

This is an internal module containing thread state
like the ``current_app``, and ``current_task``.

This module shouldn't be used directly.

"""
from __future__ import absolute_import, print_function

import os
import sys
import threading
import weakref

from celery.local import Proxy
from celery.utils.threads import LocalStack

try:
from weakref import WeakSet as AppSet
except ImportError: # XXX Py2.6

class AppSet(object): # noqa

def __init__(self):
self._refs = set()

def add(self, app):
self._refs.add(weakref.ref(app))

def __iter__(self):
dirty = []
try:
for appref in self._refs:
app = appref()
if app is None:
dirty.append(appref)
else:
yield app
finally:
while dirty:
self._refs.discard(dirty.pop())

__all__ = ['set_default_app', 'get_current_app', 'get_current_task',
'get_current_worker_task', 'current_app', 'current_task',
'connect_on_app_finalize']

#: Global default app used when no current app.
default_app = None

#: List of all app instances (weakrefs), must not be used directly.
_apps = AppSet()

#: global set of functions to call whenever a new app is finalized
#: E.g. Shared tasks, and builtin tasks are created
#: by adding callbacks here.
_on_app_finalizers = set()

_task_join_will_block = False


def connect_on_app_finalize(callback):
_on_app_finalizers.add(callback)
return callback


def _announce_app_finalized(app):
callbacks = set(_on_app_finalizers)
for callback in callbacks:
callback(app)


def _set_task_join_will_block(blocks):
global _task_join_will_block
_task_join_will_block = blocks


def task_join_will_block():
return _task_join_will_block


class _TLS(threading.local):
#: Apps with the :attr:`~celery.app.base.BaseApp.set_as_current` attribute
#: sets this, so it will always contain the last instantiated app,
#: and is the default app returned by :func:`app_or_default`.
current_app = None
_tls = _TLS()

_task_stack = LocalStack()


def set_default_app(app):
global default_app
default_app = app


def _get_current_app():
if default_app is None:
#: creates the global fallback app instance.
from celery.app import Celery
set_default_app(Celery(
'default',
loader=os.environ.get('CELERY_LOADER') or 'default',
fixups=[],
set_as_current=False, accept_magic_kwargs=True,
))
return _tls.current_app or default_app


def _set_current_app(app):
_tls.current_app = app


C_STRICT_APP = os.environ.get('C_STRICT_APP')
if os.environ.get('C_STRICT_APP'): # pragma: no cover
def get_current_app():
raise Exception('USES CURRENT APP')
import traceback
print('-- USES CURRENT_APP', file=sys.stderr) # noqa+
traceback.print_stack(file=sys.stderr)
return _get_current_app()
else:
get_current_app = _get_current_app


def get_current_task():
"""Currently executing task."""
return _task_stack.top


def get_current_worker_task():
"""Currently executing task, that was applied by the worker.

This is used to differentiate between the actual task
executed by the worker and any task that was called within
a task (using ``task.__call__`` or ``task.apply``)

"""
for task in reversed(_task_stack.stack):
if not task.request.called_directly:
return task


#: Proxy to current app.
current_app = Proxy(get_current_app)

#: Proxy to current task.
current_task = Proxy(get_current_task)


def _register_app(app):
_apps.add(app)


def _get_active_apps():
return _apps

+ 150
- 0
thesisenv/lib/python3.6/site-packages/celery/app/__init__.py View File

@@ -0,0 +1,150 @@
# -*- coding: utf-8 -*-
"""
celery.app
~~~~~~~~~~

Celery Application.

"""
from __future__ import absolute_import

import os

from celery.local import Proxy
from celery import _state
from celery._state import (
get_current_app as current_app,
get_current_task as current_task,
connect_on_app_finalize, set_default_app, _get_active_apps, _task_stack,
)
from celery.utils import gen_task_name

from .base import Celery, AppPickler

__all__ = ['Celery', 'AppPickler', 'default_app', 'app_or_default',
'bugreport', 'enable_trace', 'disable_trace', 'shared_task',
'set_default_app', 'current_app', 'current_task',
'push_current_task', 'pop_current_task']

#: Proxy always returning the app set as default.
default_app = Proxy(lambda: _state.default_app)

#: Function returning the app provided or the default app if none.
#:
#: The environment variable :envvar:`CELERY_TRACE_APP` is used to
#: trace app leaks. When enabled an exception is raised if there
#: is no active app.
app_or_default = None

#: The 'default' loader is the default loader used by old applications.
#: This is deprecated and should no longer be used as it's set too early
#: to be affected by --loader argument.
default_loader = os.environ.get('CELERY_LOADER') or 'default' # XXX


#: Function used to push a task to the thread local stack
#: keeping track of the currently executing task.
#: You must remember to pop the task after.
push_current_task = _task_stack.push

#: Function used to pop a task from the thread local stack
#: keeping track of the currently executing task.
pop_current_task = _task_stack.pop


def bugreport(app=None):
return (app or current_app()).bugreport()


def _app_or_default(app=None):
if app is None:
return _state.get_current_app()
return app


def _app_or_default_trace(app=None): # pragma: no cover
from traceback import print_stack
from billiard import current_process
if app is None:
if getattr(_state._tls, 'current_app', None):
print('-- RETURNING TO CURRENT APP --') # noqa+
print_stack()
return _state._tls.current_app
if current_process()._name == 'MainProcess':
raise Exception('DEFAULT APP')
print('-- RETURNING TO DEFAULT APP --') # noqa+
print_stack()
return _state.default_app
return app


def enable_trace():
global app_or_default
app_or_default = _app_or_default_trace


def disable_trace():
global app_or_default
app_or_default = _app_or_default

if os.environ.get('CELERY_TRACE_APP'): # pragma: no cover
enable_trace()
else:
disable_trace()

App = Celery # XXX Compat


def shared_task(*args, **kwargs):
"""Create shared tasks (decorator).
Will return a proxy that always takes the task from the current apps
task registry.

This can be used by library authors to create tasks that will work
for any app environment.

Example:

>>> from celery import Celery, shared_task
>>> @shared_task
... def add(x, y):
... return x + y

>>> app1 = Celery(broker='amqp://')
>>> add.app is app1
True

>>> app2 = Celery(broker='redis://')
>>> add.app is app2

"""

def create_shared_task(**options):

def __inner(fun):
name = options.get('name')
# Set as shared task so that unfinalized apps,
# and future apps will load the task.
connect_on_app_finalize(
lambda app: app._task_from_fun(fun, **options)
)

# Force all finalized apps to take this task as well.
for app in _get_active_apps():
if app.finalized:
with app._finalize_mutex:
app._task_from_fun(fun, **options)

# Return a proxy that always gets the task from the current
# apps task registry.
def task_by_cons():
app = current_app()
return app.tasks[
name or gen_task_name(app, fun.__name__, fun.__module__)
]
return Proxy(task_by_cons)
return __inner

if len(args) == 1 and callable(args[0]):
return create_shared_task(**kwargs)(args[0])
return create_shared_task(*args, **kwargs)

+ 512
- 0
thesisenv/lib/python3.6/site-packages/celery/app/amqp.py View File

@@ -0,0 +1,512 @@
# -*- coding: utf-8 -*-
"""
celery.app.amqp
~~~~~~~~~~~~~~~

Sending and receiving messages using Kombu.

"""
from __future__ import absolute_import

import numbers

from datetime import timedelta
from weakref import WeakValueDictionary

from kombu import Connection, Consumer, Exchange, Producer, Queue
from kombu.common import Broadcast
from kombu.pools import ProducerPool
from kombu.utils import cached_property, uuid
from kombu.utils.encoding import safe_repr
from kombu.utils.functional import maybe_list

from celery import signals
from celery.five import items, string_t
from celery.utils.text import indent as textindent
from celery.utils.timeutils import to_utc

from . import app_or_default
from . import routes as _routes

__all__ = ['AMQP', 'Queues', 'TaskProducer', 'TaskConsumer']

#: earliest date supported by time.mktime.
INT_MIN = -2147483648

#: Human readable queue declaration.
QUEUE_FORMAT = """
.> {0.name:<16} exchange={0.exchange.name}({0.exchange.type}) \
key={0.routing_key}
"""


class Queues(dict):
"""Queue name⇒ declaration mapping.

:param queues: Initial list/tuple or dict of queues.
:keyword create_missing: By default any unknown queues will be
added automatically, but if disabled
the occurrence of unknown queues
in `wanted` will raise :exc:`KeyError`.
:keyword ha_policy: Default HA policy for queues with none set.


"""
#: If set, this is a subset of queues to consume from.
#: The rest of the queues are then used for routing only.
_consume_from = None

def __init__(self, queues=None, default_exchange=None,
create_missing=True, ha_policy=None, autoexchange=None):
dict.__init__(self)
self.aliases = WeakValueDictionary()
self.default_exchange = default_exchange
self.create_missing = create_missing
self.ha_policy = ha_policy
self.autoexchange = Exchange if autoexchange is None else autoexchange
if isinstance(queues, (tuple, list)):
queues = dict((q.name, q) for q in queues)
for name, q in items(queues or {}):
self.add(q) if isinstance(q, Queue) else self.add_compat(name, **q)

def __getitem__(self, name):
try:
return self.aliases[name]
except KeyError:
return dict.__getitem__(self, name)

def __setitem__(self, name, queue):
if self.default_exchange and (not queue.exchange or
not queue.exchange.name):
queue.exchange = self.default_exchange
dict.__setitem__(self, name, queue)
if queue.alias:
self.aliases[queue.alias] = queue

def __missing__(self, name):
if self.create_missing:
return self.add(self.new_missing(name))
raise KeyError(name)

def add(self, queue, **kwargs):
"""Add new queue.

The first argument can either be a :class:`kombu.Queue` instance,
or the name of a queue. If the former the rest of the keyword
arguments are ignored, and options are simply taken from the queue
instance.

:param queue: :class:`kombu.Queue` instance or name of the queue.
:keyword exchange: (if named) specifies exchange name.
:keyword routing_key: (if named) specifies binding key.
:keyword exchange_type: (if named) specifies type of exchange.
:keyword \*\*options: (if named) Additional declaration options.

"""
if not isinstance(queue, Queue):
return self.add_compat(queue, **kwargs)
if self.ha_policy:
if queue.queue_arguments is None:
queue.queue_arguments = {}
self._set_ha_policy(queue.queue_arguments)
self[queue.name] = queue
return queue

def add_compat(self, name, **options):
# docs used to use binding_key as routing key
options.setdefault('routing_key', options.get('binding_key'))
if options['routing_key'] is None:
options['routing_key'] = name
if self.ha_policy is not None:
self._set_ha_policy(options.setdefault('queue_arguments', {}))
q = self[name] = Queue.from_dict(name, **options)
return q

def _set_ha_policy(self, args):
policy = self.ha_policy
if isinstance(policy, (list, tuple)):
return args.update({'x-ha-policy': 'nodes',
'x-ha-policy-params': list(policy)})
args['x-ha-policy'] = policy

def format(self, indent=0, indent_first=True):
"""Format routing table into string for log dumps."""
active = self.consume_from
if not active:
return ''
info = [QUEUE_FORMAT.strip().format(q)
for _, q in sorted(items(active))]
if indent_first:
return textindent('\n'.join(info), indent)
return info[0] + '\n' + textindent('\n'.join(info[1:]), indent)

def select_add(self, queue, **kwargs):
"""Add new task queue that will be consumed from even when
a subset has been selected using the :option:`-Q` option."""
q = self.add(queue, **kwargs)
if self._consume_from is not None:
self._consume_from[q.name] = q
return q

def select(self, include):
"""Sets :attr:`consume_from` by selecting a subset of the
currently defined queues.

:param include: Names of queues to consume from.
Can be iterable or string.
"""
if include:
self._consume_from = dict((name, self[name])
for name in maybe_list(include))
select_subset = select # XXX compat

def deselect(self, exclude):
"""Deselect queues so that they will not be consumed from.

:param exclude: Names of queues to avoid consuming from.
Can be iterable or string.

"""
if exclude:
exclude = maybe_list(exclude)
if self._consume_from is None:
# using selection
return self.select(k for k in self if k not in exclude)
# using all queues
for queue in exclude:
self._consume_from.pop(queue, None)
select_remove = deselect # XXX compat

def new_missing(self, name):
return Queue(name, self.autoexchange(name), name)

@property
def consume_from(self):
if self._consume_from is not None:
return self._consume_from
return self


class TaskProducer(Producer):
app = None
auto_declare = False
retry = False
retry_policy = None
utc = True
event_dispatcher = None
send_sent_event = False

def __init__(self, channel=None, exchange=None, *args, **kwargs):
self.retry = kwargs.pop('retry', self.retry)
self.retry_policy = kwargs.pop('retry_policy',
self.retry_policy or {})
self.send_sent_event = kwargs.pop('send_sent_event',
self.send_sent_event)
exchange = exchange or self.exchange
self.queues = self.app.amqp.queues # shortcut
self.default_queue = self.app.amqp.default_queue
self._default_mode = self.app.conf.CELERY_DEFAULT_DELIVERY_MODE
super(TaskProducer, self).__init__(channel, exchange, *args, **kwargs)

def publish_task(self, task_name, task_args=None, task_kwargs=None,
countdown=None, eta=None, task_id=None, group_id=None,
taskset_id=None, # compat alias to group_id
expires=None, exchange=None, exchange_type=None,
event_dispatcher=None, retry=None, retry_policy=None,
queue=None, now=None, retries=0, chord=None,
callbacks=None, errbacks=None, routing_key=None,
serializer=None, delivery_mode=None, compression=None,
reply_to=None, time_limit=None, soft_time_limit=None,
declare=None, headers=None,
send_before_publish=signals.before_task_publish.send,
before_receivers=signals.before_task_publish.receivers,
send_after_publish=signals.after_task_publish.send,
after_receivers=signals.after_task_publish.receivers,
send_task_sent=signals.task_sent.send, # XXX deprecated
sent_receivers=signals.task_sent.receivers,
**kwargs):
"""Send task message."""
retry = self.retry if retry is None else retry
headers = {} if headers is None else headers

qname = queue
if queue is None and exchange is None:
queue = self.default_queue
if queue is not None:
if isinstance(queue, string_t):
qname, queue = queue, self.queues[queue]
else:
qname = queue.name
exchange = exchange or queue.exchange.name
routing_key = routing_key or queue.routing_key
if declare is None and queue and not isinstance(queue, Broadcast):
declare = [queue]
if delivery_mode is None:
delivery_mode = self._default_mode

# merge default and custom policy
retry = self.retry if retry is None else retry
_rp = (dict(self.retry_policy, **retry_policy) if retry_policy
else self.retry_policy)
task_id = task_id or uuid()
task_args = task_args or []
task_kwargs = task_kwargs or {}
if not isinstance(task_args, (list, tuple)):
raise ValueError('task args must be a list or tuple')
if not isinstance(task_kwargs, dict):
raise ValueError('task kwargs must be a dictionary')
if countdown: # Convert countdown to ETA.
self._verify_seconds(countdown, 'countdown')
now = now or self.app.now()
eta = now + timedelta(seconds=countdown)
if self.utc:
eta = to_utc(eta).astimezone(self.app.timezone)
if isinstance(expires, numbers.Real):
self._verify_seconds(expires, 'expires')
now = now or self.app.now()
expires = now + timedelta(seconds=expires)
if self.utc:
expires = to_utc(expires).astimezone(self.app.timezone)
eta = eta and eta.isoformat()
expires = expires and expires.isoformat()

body = {
'task': task_name,
'id': task_id,
'args': task_args,
'kwargs': task_kwargs,
'retries': retries or 0,
'eta': eta,
'expires': expires,
'utc': self.utc,
'callbacks': callbacks,
'errbacks': errbacks,
'timelimit': (time_limit, soft_time_limit),
'taskset': group_id or taskset_id,
'chord': chord,
}

if before_receivers:
send_before_publish(
sender=task_name, body=body,
exchange=exchange,
routing_key=routing_key,
declare=declare,
headers=headers,
properties=kwargs,
retry_policy=retry_policy,
)

self.publish(
body,
exchange=exchange, routing_key=routing_key,
serializer=serializer or self.serializer,
compression=compression or self.compression,
headers=headers,
retry=retry, retry_policy=_rp,
reply_to=reply_to,
correlation_id=task_id,
delivery_mode=delivery_mode, declare=declare,
**kwargs
)

if after_receivers:
send_after_publish(sender=task_name, body=body,
exchange=exchange, routing_key=routing_key)

if sent_receivers: # XXX deprecated
send_task_sent(sender=task_name, task_id=task_id,
task=task_name, args=task_args,
kwargs=task_kwargs, eta=eta,
taskset=group_id or taskset_id)
if self.send_sent_event:
evd = event_dispatcher or self.event_dispatcher
exname = exchange or self.exchange
if isinstance(exname, Exchange):
exname = exname.name
evd.publish(
'task-sent',
{
'uuid': task_id,
'name': task_name,
'args': safe_repr(task_args),
'kwargs': safe_repr(task_kwargs),
'retries': retries,
'eta': eta,
'expires': expires,
'queue': qname,
'exchange': exname,
'routing_key': routing_key,
},
self, retry=retry, retry_policy=retry_policy,
)
return task_id
delay_task = publish_task # XXX Compat

def _verify_seconds(self, s, what):
if s < INT_MIN:
raise ValueError('%s is out of range: %r' % (what, s))
return s

@cached_property
def event_dispatcher(self):
# We call Dispatcher.publish with a custom producer
# so don't need the dispatcher to be "enabled".
return self.app.events.Dispatcher(enabled=False)


class TaskPublisher(TaskProducer):
"""Deprecated version of :class:`TaskProducer`."""

def __init__(self, channel=None, exchange=None, *args, **kwargs):
self.app = app_or_default(kwargs.pop('app', self.app))
self.retry = kwargs.pop('retry', self.retry)
self.retry_policy = kwargs.pop('retry_policy',
self.retry_policy or {})
exchange = exchange or self.exchange
if not isinstance(exchange, Exchange):
exchange = Exchange(exchange,
kwargs.pop('exchange_type', 'direct'))
self.queues = self.app.amqp.queues # shortcut
super(TaskPublisher, self).__init__(channel, exchange, *args, **kwargs)


class TaskConsumer(Consumer):
app = None

def __init__(self, channel, queues=None, app=None, accept=None, **kw):
self.app = app or self.app
if accept is None:
accept = self.app.conf.CELERY_ACCEPT_CONTENT
super(TaskConsumer, self).__init__(
channel,
queues or list(self.app.amqp.queues.consume_from.values()),
accept=accept,
**kw
)


class AMQP(object):
Connection = Connection
Consumer = Consumer

#: compat alias to Connection
BrokerConnection = Connection

producer_cls = TaskProducer
consumer_cls = TaskConsumer
queues_cls = Queues

#: Cached and prepared routing table.
_rtable = None

#: Underlying producer pool instance automatically
#: set by the :attr:`producer_pool`.
_producer_pool = None

# Exchange class/function used when defining automatic queues.
# E.g. you can use ``autoexchange = lambda n: None`` to use the
# amqp default exchange, which is a shortcut to bypass routing
# and instead send directly to the queue named in the routing key.
autoexchange = None

def __init__(self, app):
self.app = app

def flush_routes(self):
self._rtable = _routes.prepare(self.app.conf.CELERY_ROUTES)

def Queues(self, queues, create_missing=None, ha_policy=None,
autoexchange=None):
"""Create new :class:`Queues` instance, using queue defaults
from the current configuration."""
conf = self.app.conf
if create_missing is None:
create_missing = conf.CELERY_CREATE_MISSING_QUEUES
if ha_policy is None:
ha_policy = conf.CELERY_QUEUE_HA_POLICY
if not queues and conf.CELERY_DEFAULT_QUEUE:
queues = (Queue(conf.CELERY_DEFAULT_QUEUE,
exchange=self.default_exchange,
routing_key=conf.CELERY_DEFAULT_ROUTING_KEY), )
autoexchange = (self.autoexchange if autoexchange is None
else autoexchange)
return self.queues_cls(
queues, self.default_exchange, create_missing,
ha_policy, autoexchange,
)

def Router(self, queues=None, create_missing=None):
"""Return the current task router."""
return _routes.Router(self.routes, queues or self.queues,
self.app.either('CELERY_CREATE_MISSING_QUEUES',
create_missing), app=self.app)

@cached_property
def TaskConsumer(self):
"""Return consumer configured to consume from the queues
we are configured for (``app.amqp.queues.consume_from``)."""
return self.app.subclass_with_self(self.consumer_cls,
reverse='amqp.TaskConsumer')
get_task_consumer = TaskConsumer # XXX compat

@cached_property
def TaskProducer(self):
"""Return publisher used to send tasks.

You should use `app.send_task` instead.

"""
conf = self.app.conf
return self.app.subclass_with_self(
self.producer_cls,
reverse='amqp.TaskProducer',
exchange=self.default_exchange,
routing_key=conf.CELERY_DEFAULT_ROUTING_KEY,
serializer=conf.CELERY_TASK_SERIALIZER,
compression=conf.CELERY_MESSAGE_COMPRESSION,
retry=conf.CELERY_TASK_PUBLISH_RETRY,
retry_policy=conf.CELERY_TASK_PUBLISH_RETRY_POLICY,
send_sent_event=conf.CELERY_SEND_TASK_SENT_EVENT,
utc=conf.CELERY_ENABLE_UTC,
)
TaskPublisher = TaskProducer # compat

@cached_property
def default_queue(self):
return self.queues[self.app.conf.CELERY_DEFAULT_QUEUE]

@cached_property
def queues(self):
"""Queue name⇒ declaration mapping."""
return self.Queues(self.app.conf.CELERY_QUEUES)

@queues.setter # noqa
def queues(self, queues):
return self.Queues(queues)

@property
def routes(self):
if self._rtable is None:
self.flush_routes()
return self._rtable

@cached_property
def router(self):
return self.Router()

@property
def producer_pool(self):
if self._producer_pool is None:
self._producer_pool = ProducerPool(
self.app.pool,
limit=self.app.pool.limit,
Producer=self.TaskProducer,
)
return self._producer_pool
publisher_pool = producer_pool # compat alias

@cached_property
def default_exchange(self):
return Exchange(self.app.conf.CELERY_DEFAULT_EXCHANGE,
self.app.conf.CELERY_DEFAULT_EXCHANGE_TYPE)

+ 58
- 0
thesisenv/lib/python3.6/site-packages/celery/app/annotations.py View File

@@ -0,0 +1,58 @@
# -*- coding: utf-8 -*-
"""
celery.app.annotations
~~~~~~~~~~~~~~~~~~~~~~

Annotations is a nice term for monkey patching
task classes in the configuration.

This prepares and performs the annotations in the
:setting:`CELERY_ANNOTATIONS` setting.

"""
from __future__ import absolute_import

from celery.five import string_t
from celery.utils.functional import firstmethod, mlazy
from celery.utils.imports import instantiate

_first_match = firstmethod('annotate')
_first_match_any = firstmethod('annotate_any')

__all__ = ['MapAnnotation', 'prepare', 'resolve_all']


class MapAnnotation(dict):

def annotate_any(self):
try:
return dict(self['*'])
except KeyError:
pass

def annotate(self, task):
try:
return dict(self[task.name])
except KeyError:
pass


def prepare(annotations):
"""Expands the :setting:`CELERY_ANNOTATIONS` setting."""

def expand_annotation(annotation):
if isinstance(annotation, dict):
return MapAnnotation(annotation)
elif isinstance(annotation, string_t):
return mlazy(instantiate, annotation)
return annotation

if annotations is None:
return ()
elif not isinstance(annotations, (list, tuple)):
annotations = (annotations, )
return [expand_annotation(anno) for anno in annotations]


def resolve_all(anno, task):
return (x for x in (_first_match(anno, task), _first_match_any(anno)) if x)

+ 675
- 0
thesisenv/lib/python3.6/site-packages/celery/app/base.py View File

@@ -0,0 +1,675 @@
# -*- coding: utf-8 -*-
"""
celery.app.base
~~~~~~~~~~~~~~~

Actual App instance implementation.

"""
from __future__ import absolute_import

import os
import threading
import warnings

from collections import defaultdict, deque
from copy import deepcopy
from operator import attrgetter

from amqp import promise
from billiard.util import register_after_fork
from kombu.clocks import LamportClock
from kombu.common import oid_from
from kombu.utils import cached_property, uuid

from celery import platforms
from celery import signals
from celery._state import (
_task_stack, get_current_app, _set_current_app, set_default_app,
_register_app, get_current_worker_task, connect_on_app_finalize,
_announce_app_finalized,
)
from celery.exceptions import AlwaysEagerIgnored, ImproperlyConfigured
from celery.five import values
from celery.loaders import get_loader_cls
from celery.local import PromiseProxy, maybe_evaluate
from celery.utils.functional import first, maybe_list
from celery.utils.imports import instantiate, symbol_by_name
from celery.utils.objects import FallbackContext, mro_lookup

from .annotations import prepare as prepare_annotations
from .defaults import DEFAULTS, find_deprecated_settings
from .registry import TaskRegistry
from .utils import (
AppPickler, Settings, bugreport, _unpickle_app, _unpickle_app_v2, appstr,
)

# Load all builtin tasks
from . import builtins # noqa

__all__ = ['Celery']

_EXECV = os.environ.get('FORKED_BY_MULTIPROCESSING')
BUILTIN_FIXUPS = frozenset([
'celery.fixups.django:fixup',
])

ERR_ENVVAR_NOT_SET = """\
The environment variable {0!r} is not set,
and as such the configuration could not be loaded.
Please set this variable and make it point to
a configuration module."""

_after_fork_registered = False


def app_has_custom(app, attr):
return mro_lookup(app.__class__, attr, stop=(Celery, object),
monkey_patched=[__name__])


def _unpickle_appattr(reverse_name, args):
"""Given an attribute name and a list of args, gets
the attribute from the current app and calls it."""
return get_current_app()._rgetattr(reverse_name)(*args)


def _global_after_fork(obj):
# Previously every app would call:
# `register_after_fork(app, app._after_fork)`
# but this created a leak as `register_after_fork` stores concrete object
# references and once registered an object cannot be removed without
# touching and iterating over the private afterfork registry list.
#
# See Issue #1949
from celery import _state
from multiprocessing import util as mputil
for app in _state._apps:
try:
app._after_fork(obj)
except Exception as exc:
if mputil._logger:
mputil._logger.info(
'after forker raised exception: %r', exc, exc_info=1)


def _ensure_after_fork():
global _after_fork_registered
_after_fork_registered = True
register_after_fork(_global_after_fork, _global_after_fork)


class Celery(object):
#: This is deprecated, use :meth:`reduce_keys` instead
Pickler = AppPickler

SYSTEM = platforms.SYSTEM
IS_OSX, IS_WINDOWS = platforms.IS_OSX, platforms.IS_WINDOWS

amqp_cls = 'celery.app.amqp:AMQP'
backend_cls = None
events_cls = 'celery.events:Events'
loader_cls = 'celery.loaders.app:AppLoader'
log_cls = 'celery.app.log:Logging'
control_cls = 'celery.app.control:Control'
task_cls = 'celery.app.task:Task'
registry_cls = TaskRegistry
_fixups = None
_pool = None
builtin_fixups = BUILTIN_FIXUPS

def __init__(self, main=None, loader=None, backend=None,
amqp=None, events=None, log=None, control=None,
set_as_current=True, accept_magic_kwargs=False,
tasks=None, broker=None, include=None, changes=None,
config_source=None, fixups=None, task_cls=None,
autofinalize=True, **kwargs):
self.clock = LamportClock()
self.main = main
self.amqp_cls = amqp or self.amqp_cls
self.events_cls = events or self.events_cls
self.loader_cls = loader or self.loader_cls
self.log_cls = log or self.log_cls
self.control_cls = control or self.control_cls
self.task_cls = task_cls or self.task_cls
self.set_as_current = set_as_current
self.registry_cls = symbol_by_name(self.registry_cls)
self.accept_magic_kwargs = accept_magic_kwargs
self.user_options = defaultdict(set)
self.steps = defaultdict(set)
self.autofinalize = autofinalize

self.configured = False
self._config_source = config_source
self._pending_defaults = deque()

self.finalized = False
self._finalize_mutex = threading.Lock()
self._pending = deque()
self._tasks = tasks
if not isinstance(self._tasks, TaskRegistry):
self._tasks = TaskRegistry(self._tasks or {})

# If the class defines a custom __reduce_args__ we need to use
# the old way of pickling apps, which is pickling a list of
# args instead of the new way that pickles a dict of keywords.
self._using_v1_reduce = app_has_custom(self, '__reduce_args__')

# these options are moved to the config to
# simplify pickling of the app object.
self._preconf = changes or {}
if broker:
self._preconf['BROKER_URL'] = broker
if backend:
self._preconf['CELERY_RESULT_BACKEND'] = backend
if include:
self._preconf['CELERY_IMPORTS'] = include

# - Apply fixups.
self.fixups = set(self.builtin_fixups) if fixups is None else fixups
# ...store fixup instances in _fixups to keep weakrefs alive.
self._fixups = [symbol_by_name(fixup)(self) for fixup in self.fixups]

if self.set_as_current:
self.set_current()

self.on_init()
_register_app(self)

def set_current(self):
_set_current_app(self)

def set_default(self):
set_default_app(self)

def __enter__(self):
return self

def __exit__(self, *exc_info):
self.close()

def close(self):
self._maybe_close_pool()

def on_init(self):
"""Optional callback called at init."""
pass

def start(self, argv=None):
return instantiate(
'celery.bin.celery:CeleryCommand',
app=self).execute_from_commandline(argv)

def worker_main(self, argv=None):
return instantiate(
'celery.bin.worker:worker',
app=self).execute_from_commandline(argv)

def task(self, *args, **opts):
"""Creates new task class from any callable."""
if _EXECV and not opts.get('_force_evaluate'):
# When using execv the task in the original module will point to a
# different app, so doing things like 'add.request' will point to
# a differnt task instance. This makes sure it will always use
# the task instance from the current app.
# Really need a better solution for this :(
from . import shared_task
return shared_task(*args, _force_evaluate=True, **opts)

def inner_create_task_cls(shared=True, filter=None, **opts):
_filt = filter # stupid 2to3

def _create_task_cls(fun):
if shared:
def cons(app):
return app._task_from_fun(fun, **opts)
cons.__name__ = fun.__name__
connect_on_app_finalize(cons)
if self.accept_magic_kwargs: # compat mode
task = self._task_from_fun(fun, **opts)
if filter:
task = filter(task)
return task

if self.finalized or opts.get('_force_evaluate'):
ret = self._task_from_fun(fun, **opts)
else:
# return a proxy object that evaluates on first use
ret = PromiseProxy(self._task_from_fun, (fun, ), opts,
__doc__=fun.__doc__)
self._pending.append(ret)
if _filt:
return _filt(ret)
return ret

return _create_task_cls

if len(args) == 1:
if callable(args[0]):
return inner_create_task_cls(**opts)(*args)
raise TypeError('argument 1 to @task() must be a callable')
if args:
raise TypeError(
'@task() takes exactly 1 argument ({0} given)'.format(
sum([len(args), len(opts)])))
return inner_create_task_cls(**opts)

def _task_from_fun(self, fun, **options):
if not self.finalized and not self.autofinalize:
raise RuntimeError('Contract breach: app not finalized')
base = options.pop('base', None) or self.Task
bind = options.pop('bind', False)

T = type(fun.__name__, (base, ), dict({
'app': self,
'accept_magic_kwargs': False,
'run': fun if bind else staticmethod(fun),
'_decorated': True,
'__doc__': fun.__doc__,
'__module__': fun.__module__,
'__wrapped__': fun}, **options))()
task = self._tasks[T.name] # return global instance.
return task

def finalize(self, auto=False):
with self._finalize_mutex:
if not self.finalized:
if auto and not self.autofinalize:
raise RuntimeError('Contract breach: app not finalized')
self.finalized = True
_announce_app_finalized(self)

pending = self._pending
while pending:
maybe_evaluate(pending.popleft())

for task in values(self._tasks):
task.bind(self)

def add_defaults(self, fun):
if not callable(fun):
d, fun = fun, lambda: d
if self.configured:
return self.conf.add_defaults(fun())
self._pending_defaults.append(fun)

def config_from_object(self, obj, silent=False, force=False):
self._config_source = obj
if force or self.configured:
del(self.conf)
return self.loader.config_from_object(obj, silent=silent)

def config_from_envvar(self, variable_name, silent=False, force=False):
module_name = os.environ.get(variable_name)
if not module_name:
if silent:
return False
raise ImproperlyConfigured(
ERR_ENVVAR_NOT_SET.format(variable_name))
return self.config_from_object(module_name, silent=silent, force=force)

def config_from_cmdline(self, argv, namespace='celery'):
self.conf.update(self.loader.cmdline_config_parser(argv, namespace))

def setup_security(self, allowed_serializers=None, key=None, cert=None,
store=None, digest='sha1', serializer='json'):
from celery.security import setup_security
return setup_security(allowed_serializers, key, cert,
store, digest, serializer, app=self)

def autodiscover_tasks(self, packages, related_name='tasks', force=False):
if force:
return self._autodiscover_tasks(packages, related_name)
signals.import_modules.connect(promise(
self._autodiscover_tasks, (packages, related_name),
), weak=False, sender=self)

def _autodiscover_tasks(self, packages, related_name='tasks', **kwargs):
# argument may be lazy
packages = packages() if callable(packages) else packages
self.loader.autodiscover_tasks(packages, related_name)

def send_task(self, name, args=None, kwargs=None, countdown=None,
eta=None, task_id=None, producer=None, connection=None,
router=None, result_cls=None, expires=None,
publisher=None, link=None, link_error=None,
add_to_parent=True, reply_to=None, **options):
task_id = task_id or uuid()
producer = producer or publisher # XXX compat
router = router or self.amqp.router
conf = self.conf
if conf.CELERY_ALWAYS_EAGER: # pragma: no cover
warnings.warn(AlwaysEagerIgnored(
'CELERY_ALWAYS_EAGER has no effect on send_task',
), stacklevel=2)
options = router.route(options, name, args, kwargs)
if connection:
producer = self.amqp.TaskProducer(connection)
with self.producer_or_acquire(producer) as P:
self.backend.on_task_call(P, task_id)
task_id = P.publish_task(
name, args, kwargs, countdown=countdown, eta=eta,
task_id=task_id, expires=expires,
callbacks=maybe_list(link), errbacks=maybe_list(link_error),
reply_to=reply_to or self.oid, **options
)
result = (result_cls or self.AsyncResult)(task_id)
if add_to_parent:
parent = get_current_worker_task()
if parent:
parent.add_trail(result)
return result

def connection(self, hostname=None, userid=None, password=None,
virtual_host=None, port=None, ssl=None,
connect_timeout=None, transport=None,
transport_options=None, heartbeat=None,
login_method=None, failover_strategy=None, **kwargs):
conf = self.conf
return self.amqp.Connection(
hostname or conf.BROKER_URL,
userid or conf.BROKER_USER,
password or conf.BROKER_PASSWORD,
virtual_host or conf.BROKER_VHOST,
port or conf.BROKER_PORT,
transport=transport or conf.BROKER_TRANSPORT,
ssl=self.either('BROKER_USE_SSL', ssl),
heartbeat=heartbeat,
login_method=login_method or conf.BROKER_LOGIN_METHOD,
failover_strategy=(
failover_strategy or conf.BROKER_FAILOVER_STRATEGY
),
transport_options=dict(
conf.BROKER_TRANSPORT_OPTIONS, **transport_options or {}
),
connect_timeout=self.either(
'BROKER_CONNECTION_TIMEOUT', connect_timeout
),
)
broker_connection = connection

def _acquire_connection(self, pool=True):
"""Helper for :meth:`connection_or_acquire`."""
if pool:
return self.pool.acquire(block=True)
return self.connection()

def connection_or_acquire(self, connection=None, pool=True, *_, **__):
return FallbackContext(connection, self._acquire_connection, pool=pool)
default_connection = connection_or_acquire # XXX compat

def producer_or_acquire(self, producer=None):
return FallbackContext(
producer, self.amqp.producer_pool.acquire, block=True,
)
default_producer = producer_or_acquire # XXX compat

def prepare_config(self, c):
"""Prepare configuration before it is merged with the defaults."""
return find_deprecated_settings(c)

def now(self):
return self.loader.now(utc=self.conf.CELERY_ENABLE_UTC)

def mail_admins(self, subject, body, fail_silently=False):
if self.conf.ADMINS:
to = [admin_email for _, admin_email in self.conf.ADMINS]
return self.loader.mail_admins(
subject, body, fail_silently, to=to,
sender=self.conf.SERVER_EMAIL,
host=self.conf.EMAIL_HOST,
port=self.conf.EMAIL_PORT,
user=self.conf.EMAIL_HOST_USER,
password=self.conf.EMAIL_HOST_PASSWORD,
timeout=self.conf.EMAIL_TIMEOUT,
use_ssl=self.conf.EMAIL_USE_SSL,
use_tls=self.conf.EMAIL_USE_TLS,
)

def select_queues(self, queues=None):
return self.amqp.queues.select(queues)

def either(self, default_key, *values):
"""Fallback to the value of a configuration key if none of the
`*values` are true."""
return first(None, values) or self.conf.get(default_key)

def bugreport(self):
return bugreport(self)

def _get_backend(self):
from celery.backends import get_backend_by_url
backend, url = get_backend_by_url(
self.backend_cls or self.conf.CELERY_RESULT_BACKEND,
self.loader)
return backend(app=self, url=url)

def on_configure(self):
"""Callback calld when the app loads configuration"""
pass

def _get_config(self):
self.on_configure()
if self._config_source:
self.loader.config_from_object(self._config_source)
self.configured = True
s = Settings({}, [self.prepare_config(self.loader.conf),
deepcopy(DEFAULTS)])
# load lazy config dict initializers.
pending = self._pending_defaults
while pending:
s.add_defaults(maybe_evaluate(pending.popleft()()))

# preconf options must be explicitly set in the conf, and not
# as defaults or they will not be pickled with the app instance.
# This will cause errors when `CELERYD_FORCE_EXECV=True` as
# the workers will not have a BROKER_URL, CELERY_RESULT_BACKEND,
# or CELERY_IMPORTS set in the config.
if self._preconf:
s.update(self._preconf)
return s

def _after_fork(self, obj_):
self._maybe_close_pool()

def _maybe_close_pool(self):
pool, self._pool = self._pool, None
if pool is not None:
pool.force_close_all()
amqp = self.__dict__.get('amqp')
if amqp is not None:
producer_pool, amqp._producer_pool = amqp._producer_pool, None
if producer_pool is not None:
producer_pool.force_close_all()

def signature(self, *args, **kwargs):
kwargs['app'] = self
return self.canvas.signature(*args, **kwargs)

def create_task_cls(self):
"""Creates a base task class using default configuration
taken from this app."""
return self.subclass_with_self(
self.task_cls, name='Task', attribute='_app',
keep_reduce=True, abstract=True,
)

def subclass_with_self(self, Class, name=None, attribute='app',
reverse=None, keep_reduce=False, **kw):
"""Subclass an app-compatible class by setting its app attribute
to be this app instance.

App-compatible means that the class has a class attribute that
provides the default app it should use, e.g.
``class Foo: app = None``.

:param Class: The app-compatible class to subclass.
:keyword name: Custom name for the target class.
:keyword attribute: Name of the attribute holding the app,
default is 'app'.

"""
Class = symbol_by_name(Class)
reverse = reverse if reverse else Class.__name__

def __reduce__(self):
return _unpickle_appattr, (reverse, self.__reduce_args__())

attrs = dict({attribute: self}, __module__=Class.__module__,
__doc__=Class.__doc__, **kw)
if not keep_reduce:
attrs['__reduce__'] = __reduce__

return type(name or Class.__name__, (Class, ), attrs)

def _rgetattr(self, path):
return attrgetter(path)(self)

def __repr__(self):
return '<{0} {1}>'.format(type(self).__name__, appstr(self))

def __reduce__(self):
if self._using_v1_reduce:
return self.__reduce_v1__()
return (_unpickle_app_v2, (self.__class__, self.__reduce_keys__()))

def __reduce_v1__(self):
# Reduce only pickles the configuration changes,
# so the default configuration doesn't have to be passed
# between processes.
return (
_unpickle_app,
(self.__class__, self.Pickler) + self.__reduce_args__(),
)

def __reduce_keys__(self):
"""Return keyword arguments used to reconstruct the object
when unpickling."""
return {
'main': self.main,
'changes': self.conf.changes if self.configured else self._preconf,
'loader': self.loader_cls,
'backend': self.backend_cls,
'amqp': self.amqp_cls,
'events': self.events_cls,
'log': self.log_cls,
'control': self.control_cls,
'accept_magic_kwargs': self.accept_magic_kwargs,
'fixups': self.fixups,
'config_source': self._config_source,
'task_cls': self.task_cls,
}

def __reduce_args__(self):
"""Deprecated method, please use :meth:`__reduce_keys__` instead."""
return (self.main, self.conf.changes,
self.loader_cls, self.backend_cls, self.amqp_cls,
self.events_cls, self.log_cls, self.control_cls,
self.accept_magic_kwargs, self._config_source)

@cached_property
def Worker(self):
return self.subclass_with_self('celery.apps.worker:Worker')

@cached_property
def WorkController(self, **kwargs):
return self.subclass_with_self('celery.worker:WorkController')

@cached_property
def Beat(self, **kwargs):
return self.subclass_with_self('celery.apps.beat:Beat')

@cached_property
def Task(self):
return self.create_task_cls()

@cached_property
def annotations(self):
return prepare_annotations(self.conf.CELERY_ANNOTATIONS)

@cached_property
def AsyncResult(self):
return self.subclass_with_self('celery.result:AsyncResult')

@cached_property
def ResultSet(self):
return self.subclass_with_self('celery.result:ResultSet')

@cached_property
def GroupResult(self):
return self.subclass_with_self('celery.result:GroupResult')

@cached_property
def TaskSet(self): # XXX compat
"""Deprecated! Please use :class:`celery.group` instead."""
return self.subclass_with_self('celery.task.sets:TaskSet')

@cached_property
def TaskSetResult(self): # XXX compat
"""Deprecated! Please use :attr:`GroupResult` instead."""
return self.subclass_with_self('celery.result:TaskSetResult')

@property
def pool(self):
if self._pool is None:
_ensure_after_fork()
limit = self.conf.BROKER_POOL_LIMIT
self._pool = self.connection().Pool(limit=limit)
return self._pool

@property
def current_task(self):
return _task_stack.top

@cached_property
def oid(self):
return oid_from(self)

@cached_property
def amqp(self):
return instantiate(self.amqp_cls, app=self)

@cached_property
def backend(self):
return self._get_backend()

@cached_property
def conf(self):
return self._get_config()

@cached_property
def control(self):
return instantiate(self.control_cls, app=self)

@cached_property
def events(self):
return instantiate(self.events_cls, app=self)

@cached_property
def loader(self):
return get_loader_cls(self.loader_cls)(app=self)

@cached_property
def log(self):
return instantiate(self.log_cls, app=self)

@cached_property
def canvas(self):
from celery import canvas
return canvas

@cached_property
def tasks(self):
self.finalize(auto=True)
return self._tasks

@cached_property
def timezone(self):
from celery.utils.timeutils import timezone
conf = self.conf
tz = conf.CELERY_TIMEZONE
if not tz:
return (timezone.get_timezone('UTC') if conf.CELERY_ENABLE_UTC
else timezone.local)
return timezone.get_timezone(self.conf.CELERY_TIMEZONE)
App = Celery # compat

+ 379
- 0
thesisenv/lib/python3.6/site-packages/celery/app/builtins.py View File

@@ -0,0 +1,379 @@
# -*- coding: utf-8 -*-
"""
celery.app.builtins
~~~~~~~~~~~~~~~~~~~

Built-in tasks that are always available in all
app instances. E.g. chord, group and xmap.

"""
from __future__ import absolute_import

from collections import deque

from celery._state import get_current_worker_task, connect_on_app_finalize
from celery.utils import uuid
from celery.utils.log import get_logger

__all__ = []

logger = get_logger(__name__)


@connect_on_app_finalize
def add_backend_cleanup_task(app):
"""The backend cleanup task can be used to clean up the default result
backend.

If the configured backend requires periodic cleanup this task is also
automatically configured to run every day at 4am (requires
:program:`celery beat` to be running).

"""
@app.task(name='celery.backend_cleanup',
shared=False, _force_evaluate=True)
def backend_cleanup():
app.backend.cleanup()
return backend_cleanup


@connect_on_app_finalize
def add_unlock_chord_task(app):
"""This task is used by result backends without native chord support.

It joins chords by creating a task chain polling the header for completion.

"""
from celery.canvas import signature
from celery.exceptions import ChordError
from celery.result import allow_join_result, result_from_tuple

default_propagate = app.conf.CELERY_CHORD_PROPAGATES

@app.task(name='celery.chord_unlock', max_retries=None, shared=False,
default_retry_delay=1, ignore_result=True, _force_evaluate=True,
bind=True)
def unlock_chord(self, group_id, callback, interval=None, propagate=None,
max_retries=None, result=None,
Result=app.AsyncResult, GroupResult=app.GroupResult,
result_from_tuple=result_from_tuple):
# if propagate is disabled exceptions raised by chord tasks
# will be sent as part of the result list to the chord callback.
# Since 3.1 propagate will be enabled by default, and instead
# the chord callback changes state to FAILURE with the
# exception set to ChordError.
propagate = default_propagate if propagate is None else propagate
if interval is None:
interval = self.default_retry_delay

# check if the task group is ready, and if so apply the callback.
deps = GroupResult(
group_id,
[result_from_tuple(r, app=app) for r in result],
app=app,
)
j = deps.join_native if deps.supports_native_join else deps.join

try:
ready = deps.ready()
except Exception as exc:
raise self.retry(
exc=exc, countdown=interval, max_retries=max_retries,
)
else:
if not ready:
raise self.retry(countdown=interval, max_retries=max_retries)

callback = signature(callback, app=app)
try:
with allow_join_result():
ret = j(timeout=3.0, propagate=propagate)
except Exception as exc:
try:
culprit = next(deps._failed_join_report())
reason = 'Dependency {0.id} raised {1!r}'.format(
culprit, exc,
)
except StopIteration:
reason = repr(exc)
logger.error('Chord %r raised: %r', group_id, exc, exc_info=1)
app.backend.chord_error_from_stack(callback,
ChordError(reason))
else:
try:
callback.delay(ret)
except Exception as exc:
logger.error('Chord %r raised: %r', group_id, exc, exc_info=1)
app.backend.chord_error_from_stack(
callback,
exc=ChordError('Callback error: {0!r}'.format(exc)),
)
return unlock_chord


@connect_on_app_finalize
def add_map_task(app):
from celery.canvas import signature

@app.task(name='celery.map', shared=False, _force_evaluate=True)
def xmap(task, it):
task = signature(task, app=app).type
return [task(item) for item in it]
return xmap


@connect_on_app_finalize
def add_starmap_task(app):
from celery.canvas import signature

@app.task(name='celery.starmap', shared=False, _force_evaluate=True)
def xstarmap(task, it):
task = signature(task, app=app).type
return [task(*item) for item in it]
return xstarmap


@connect_on_app_finalize
def add_chunk_task(app):
from celery.canvas import chunks as _chunks

@app.task(name='celery.chunks', shared=False, _force_evaluate=True)
def chunks(task, it, n):
return _chunks.apply_chunks(task, it, n)
return chunks


@connect_on_app_finalize
def add_group_task(app):
_app = app
from celery.canvas import maybe_signature, signature
from celery.result import result_from_tuple

class Group(app.Task):
app = _app
name = 'celery.group'
accept_magic_kwargs = False
_decorated = True

def run(self, tasks, result, group_id, partial_args,
add_to_parent=True):
app = self.app
result = result_from_tuple(result, app)
# any partial args are added to all tasks in the group
taskit = (signature(task, app=app).clone(partial_args)
for i, task in enumerate(tasks))
if self.request.is_eager or app.conf.CELERY_ALWAYS_EAGER:
return app.GroupResult(
result.id,
[stask.apply(group_id=group_id) for stask in taskit],
)
with app.producer_or_acquire() as pub:
[stask.apply_async(group_id=group_id, producer=pub,
add_to_parent=False) for stask in taskit]
parent = get_current_worker_task()
if add_to_parent and parent:
parent.add_trail(result)
return result

def prepare(self, options, tasks, args, **kwargs):
options['group_id'] = group_id = (
options.setdefault('task_id', uuid()))

def prepare_member(task):
task = maybe_signature(task, app=self.app)
task.options['group_id'] = group_id
return task, task.freeze()

try:
tasks, res = list(zip(
*[prepare_member(task) for task in tasks]
))
except ValueError: # tasks empty
tasks, res = [], []
return (tasks, self.app.GroupResult(group_id, res), group_id, args)

def apply_async(self, partial_args=(), kwargs={}, **options):
if self.app.conf.CELERY_ALWAYS_EAGER:
return self.apply(partial_args, kwargs, **options)
tasks, result, gid, args = self.prepare(
options, args=partial_args, **kwargs
)
super(Group, self).apply_async((
list(tasks), result.as_tuple(), gid, args), **options
)
return result

def apply(self, args=(), kwargs={}, **options):
return super(Group, self).apply(
self.prepare(options, args=args, **kwargs),
**options).get()
return Group


@connect_on_app_finalize
def add_chain_task(app):
from celery.canvas import (
Signature, chain, chord, group, maybe_signature, maybe_unroll_group,
)

_app = app

class Chain(app.Task):
app = _app
name = 'celery.chain'
accept_magic_kwargs = False
_decorated = True

def prepare_steps(self, args, tasks):
app = self.app
steps = deque(tasks)
next_step = prev_task = prev_res = None
tasks, results = [], []
i = 0
while steps:
# First task get partial args from chain.
task = maybe_signature(steps.popleft(), app=app)
task = task.clone() if i else task.clone(args)
res = task.freeze()
i += 1

if isinstance(task, group):
task = maybe_unroll_group(task)
if isinstance(task, chain):
# splice the chain
steps.extendleft(reversed(task.tasks))
continue

elif isinstance(task, group) and steps and \
not isinstance(steps[0], group):
# automatically upgrade group(..) | s to chord(group, s)
try:
next_step = steps.popleft()
# for chords we freeze by pretending it's a normal
# task instead of a group.
res = Signature.freeze(next_step)
task = chord(task, body=next_step, task_id=res.task_id)
except IndexError:
pass # no callback, so keep as group
if prev_task:
# link previous task to this task.
prev_task.link(task)
# set the results parent attribute.
if not res.parent:
res.parent = prev_res

if not isinstance(prev_task, chord):
results.append(res)
tasks.append(task)
prev_task, prev_res = task, res

return tasks, results

def apply_async(self, args=(), kwargs={}, group_id=None, chord=None,
task_id=None, link=None, link_error=None, **options):
if self.app.conf.CELERY_ALWAYS_EAGER:
return self.apply(args, kwargs, **options)
options.pop('publisher', None)
tasks, results = self.prepare_steps(args, kwargs['tasks'])
result = results[-1]
if group_id:
tasks[-1].set(group_id=group_id)
if chord:
tasks[-1].set(chord=chord)
if task_id:
tasks[-1].set(task_id=task_id)
result = tasks[-1].type.AsyncResult(task_id)
# make sure we can do a link() and link_error() on a chain object.
if link:
tasks[-1].set(link=link)
# and if any task in the chain fails, call the errbacks
if link_error:
for task in tasks:
task.set(link_error=link_error)
tasks[0].apply_async(**options)
return result

def apply(self, args=(), kwargs={}, signature=maybe_signature,
**options):
app = self.app
last, fargs = None, args # fargs passed to first task only
for task in kwargs['tasks']:
res = signature(task, app=app).clone(fargs).apply(
last and (last.get(), ),
)
res.parent, last, fargs = last, res, None
return last
return Chain


@connect_on_app_finalize
def add_chord_task(app):
"""Every chord is executed in a dedicated task, so that the chord
can be used as a signature, and this generates the task
responsible for that."""
from celery import group
from celery.canvas import maybe_signature
_app = app
default_propagate = app.conf.CELERY_CHORD_PROPAGATES

class Chord(app.Task):
app = _app
name = 'celery.chord'
accept_magic_kwargs = False
ignore_result = False
_decorated = True

def run(self, header, body, partial_args=(), interval=None,
countdown=1, max_retries=None, propagate=None,
eager=False, **kwargs):
app = self.app
propagate = default_propagate if propagate is None else propagate
group_id = uuid()

# - convert back to group if serialized
tasks = header.tasks if isinstance(header, group) else header
header = group([
maybe_signature(s, app=app).clone() for s in tasks
], app=self.app)
# - eager applies the group inline
if eager:
return header.apply(args=partial_args, task_id=group_id)

body['chord_size'] = len(header.tasks)
results = header.freeze(group_id=group_id, chord=body).results

return self.backend.apply_chord(
header, partial_args, group_id,
body, interval=interval, countdown=countdown,
max_retries=max_retries, propagate=propagate, result=results,
)

def apply_async(self, args=(), kwargs={}, task_id=None,
group_id=None, chord=None, **options):
app = self.app
if app.conf.CELERY_ALWAYS_EAGER:
return self.apply(args, kwargs, **options)
header = kwargs.pop('header')
body = kwargs.pop('body')
header, body = (maybe_signature(header, app=app),
maybe_signature(body, app=app))
# forward certain options to body
if chord is not None:
body.options['chord'] = chord
if group_id is not None:
body.options['group_id'] = group_id
[body.link(s) for s in options.pop('link', [])]
[body.link_error(s) for s in options.pop('link_error', [])]
body_result = body.freeze(task_id)
parent = super(Chord, self).apply_async((header, body, args),
kwargs, **options)
body_result.parent = parent
return body_result

def apply(self, args=(), kwargs={}, propagate=True, **options):
body = kwargs['body']
res = super(Chord, self).apply(args, dict(kwargs, eager=True),
**options)
return maybe_signature(body, app=self.app).apply(
args=(res.get(propagate=propagate).get(), ))
return Chord

+ 317
- 0
thesisenv/lib/python3.6/site-packages/celery/app/control.py View File

@@ -0,0 +1,317 @@
# -*- coding: utf-8 -*-
"""
celery.app.control
~~~~~~~~~~~~~~~~~~~

Client for worker remote control commands.
Server implementation is in :mod:`celery.worker.control`.

"""
from __future__ import absolute_import

import warnings

from kombu.pidbox import Mailbox
from kombu.utils import cached_property

from celery.exceptions import DuplicateNodenameWarning
from celery.utils.text import pluralize

__all__ = ['Inspect', 'Control', 'flatten_reply']

W_DUPNODE = """\
Received multiple replies from node {0}: {1}.
Please make sure you give each node a unique nodename using the `-n` option.\
"""


def flatten_reply(reply):
nodes, dupes = {}, set()
for item in reply:
[dupes.add(name) for name in item if name in nodes]
nodes.update(item)
if dupes:
warnings.warn(DuplicateNodenameWarning(
W_DUPNODE.format(
pluralize(len(dupes), 'name'), ', '.join(sorted(dupes)),
),
))
return nodes


class Inspect(object):
app = None

def __init__(self, destination=None, timeout=1, callback=None,
connection=None, app=None, limit=None):
self.app = app or self.app
self.destination = destination
self.timeout = timeout
self.callback = callback
self.connection = connection
self.limit = limit

def _prepare(self, reply):
if not reply:
return
by_node = flatten_reply(reply)
if self.destination and \
not isinstance(self.destination, (list, tuple)):
return by_node.get(self.destination)
return by_node

def _request(self, command, **kwargs):
return self._prepare(self.app.control.broadcast(
command,
arguments=kwargs,
destination=self.destination,
callback=self.callback,
connection=self.connection,
limit=self.limit,
timeout=self.timeout, reply=True,
))

def report(self):
return self._request('report')

def clock(self):
return self._request('clock')

def active(self, safe=False):
return self._request('dump_active', safe=safe)

def scheduled(self, safe=False):
return self._request('dump_schedule', safe=safe)

def reserved(self, safe=False):
return self._request('dump_reserved', safe=safe)

def stats(self):
return self._request('stats')

def revoked(self):
return self._request('dump_revoked')

def registered(self, *taskinfoitems):
return self._request('dump_tasks', taskinfoitems=taskinfoitems)
registered_tasks = registered

def ping(self):
return self._request('ping')

def active_queues(self):
return self._request('active_queues')

def query_task(self, ids):
return self._request('query_task', ids=ids)

def conf(self, with_defaults=False):
return self._request('dump_conf', with_defaults=with_defaults)

def hello(self, from_node, revoked=None):
return self._request('hello', from_node=from_node, revoked=revoked)

def memsample(self):
return self._request('memsample')

def memdump(self, samples=10):
return self._request('memdump', samples=samples)

def objgraph(self, type='Request', n=200, max_depth=10):
return self._request('objgraph', num=n, max_depth=max_depth, type=type)


class Control(object):
Mailbox = Mailbox

def __init__(self, app=None):
self.app = app
self.mailbox = self.Mailbox('celery', type='fanout', accept=['json'])

@cached_property
def inspect(self):
return self.app.subclass_with_self(Inspect, reverse='control.inspect')

def purge(self, connection=None):
"""Discard all waiting tasks.

This will ignore all tasks waiting for execution, and they will
be deleted from the messaging server.

:returns: the number of tasks discarded.

"""
with self.app.connection_or_acquire(connection) as conn:
return self.app.amqp.TaskConsumer(conn).purge()
discard_all = purge

def election(self, id, topic, action=None, connection=None):
self.broadcast('election', connection=connection, arguments={
'id': id, 'topic': topic, 'action': action,
})

def revoke(self, task_id, destination=None, terminate=False,
signal='SIGTERM', **kwargs):
"""Tell all (or specific) workers to revoke a task by id.

If a task is revoked, the workers will ignore the task and
not execute it after all.

:param task_id: Id of the task to revoke.
:keyword terminate: Also terminate the process currently working
on the task (if any).
:keyword signal: Name of signal to send to process if terminate.
Default is TERM.

See :meth:`broadcast` for supported keyword arguments.

"""
return self.broadcast('revoke', destination=destination,
arguments={'task_id': task_id,
'terminate': terminate,
'signal': signal}, **kwargs)

def ping(self, destination=None, timeout=1, **kwargs):
"""Ping all (or specific) workers.

Will return the list of answers.

See :meth:`broadcast` for supported keyword arguments.

"""
return self.broadcast('ping', reply=True, destination=destination,
timeout=timeout, **kwargs)

def rate_limit(self, task_name, rate_limit, destination=None, **kwargs):
"""Tell all (or specific) workers to set a new rate limit
for task by type.

:param task_name: Name of task to change rate limit for.
:param rate_limit: The rate limit as tasks per second, or a rate limit
string (`'100/m'`, etc.
see :attr:`celery.task.base.Task.rate_limit` for
more information).

See :meth:`broadcast` for supported keyword arguments.

"""
return self.broadcast('rate_limit', destination=destination,
arguments={'task_name': task_name,
'rate_limit': rate_limit},
**kwargs)

def add_consumer(self, queue, exchange=None, exchange_type='direct',
routing_key=None, options=None, **kwargs):
"""Tell all (or specific) workers to start consuming from a new queue.

Only the queue name is required as if only the queue is specified
then the exchange/routing key will be set to the same name (
like automatic queues do).

.. note::

This command does not respect the default queue/exchange
options in the configuration.

:param queue: Name of queue to start consuming from.
:keyword exchange: Optional name of exchange.
:keyword exchange_type: Type of exchange (defaults to 'direct')
command to, when empty broadcast to all workers.
:keyword routing_key: Optional routing key.
:keyword options: Additional options as supported
by :meth:`kombu.entitiy.Queue.from_dict`.

See :meth:`broadcast` for supported keyword arguments.

"""
return self.broadcast(
'add_consumer',
arguments=dict({'queue': queue, 'exchange': exchange,
'exchange_type': exchange_type,
'routing_key': routing_key}, **options or {}),
**kwargs
)

def cancel_consumer(self, queue, **kwargs):
"""Tell all (or specific) workers to stop consuming from ``queue``.

Supports the same keyword arguments as :meth:`broadcast`.

"""
return self.broadcast(
'cancel_consumer', arguments={'queue': queue}, **kwargs
)

def time_limit(self, task_name, soft=None, hard=None, **kwargs):
"""Tell all (or specific) workers to set time limits for
a task by type.

:param task_name: Name of task to change time limits for.
:keyword soft: New soft time limit (in seconds).
:keyword hard: New hard time limit (in seconds).

Any additional keyword arguments are passed on to :meth:`broadcast`.

"""
return self.broadcast(
'time_limit',
arguments={'task_name': task_name,
'hard': hard, 'soft': soft}, **kwargs)

def enable_events(self, destination=None, **kwargs):
"""Tell all (or specific) workers to enable events."""
return self.broadcast('enable_events', {}, destination, **kwargs)

def disable_events(self, destination=None, **kwargs):
"""Tell all (or specific) workers to disable events."""
return self.broadcast('disable_events', {}, destination, **kwargs)

def pool_grow(self, n=1, destination=None, **kwargs):
"""Tell all (or specific) workers to grow the pool by ``n``.

Supports the same arguments as :meth:`broadcast`.

"""
return self.broadcast('pool_grow', {'n': n}, destination, **kwargs)

def pool_shrink(self, n=1, destination=None, **kwargs):
"""Tell all (or specific) workers to shrink the pool by ``n``.

Supports the same arguments as :meth:`broadcast`.

"""
return self.broadcast('pool_shrink', {'n': n}, destination, **kwargs)

def autoscale(self, max, min, destination=None, **kwargs):
"""Change worker(s) autoscale setting.

Supports the same arguments as :meth:`broadcast`.

"""
return self.broadcast(
'autoscale', {'max': max, 'min': min}, destination, **kwargs)

def broadcast(self, command, arguments=None, destination=None,
connection=None, reply=False, timeout=1, limit=None,
callback=None, channel=None, **extra_kwargs):
"""Broadcast a control command to the celery workers.

:param command: Name of command to send.
:param arguments: Keyword arguments for the command.
:keyword destination: If set, a list of the hosts to send the
command to, when empty broadcast to all workers.
:keyword connection: Custom broker connection to use, if not set,
a connection will be established automatically.
:keyword reply: Wait for and return the reply.
:keyword timeout: Timeout in seconds to wait for the reply.
:keyword limit: Limit number of replies.
:keyword callback: Callback called immediately for each reply
received.

"""
with self.app.connection_or_acquire(connection) as conn:
arguments = dict(arguments or {}, **extra_kwargs)
return self.mailbox(conn)._broadcast(
command, arguments, destination, reply, timeout,
limit, callback, channel=channel,
)

+ 274
- 0
thesisenv/lib/python3.6/site-packages/celery/app/defaults.py View File

@@ -0,0 +1,274 @@
# -*- coding: utf-8 -*-
"""
celery.app.defaults
~~~~~~~~~~~~~~~~~~~

Configuration introspection and defaults.

"""
from __future__ import absolute_import

import sys

from collections import deque, namedtuple
from datetime import timedelta

from celery.five import items
from celery.utils import strtobool
from celery.utils.functional import memoize

__all__ = ['Option', 'NAMESPACES', 'flatten', 'find']

is_jython = sys.platform.startswith('java')
is_pypy = hasattr(sys, 'pypy_version_info')

DEFAULT_POOL = 'prefork'
if is_jython:
DEFAULT_POOL = 'threads'
elif is_pypy:
if sys.pypy_version_info[0:3] < (1, 5, 0):
DEFAULT_POOL = 'solo'
else:
DEFAULT_POOL = 'prefork'

DEFAULT_ACCEPT_CONTENT = ['json', 'pickle', 'msgpack', 'yaml']
DEFAULT_PROCESS_LOG_FMT = """
[%(asctime)s: %(levelname)s/%(processName)s] %(message)s
""".strip()
DEFAULT_LOG_FMT = '[%(asctime)s: %(levelname)s] %(message)s'
DEFAULT_TASK_LOG_FMT = """[%(asctime)s: %(levelname)s/%(processName)s] \
%(task_name)s[%(task_id)s]: %(message)s"""

_BROKER_OLD = {'deprecate_by': '2.5', 'remove_by': '4.0',
'alt': 'BROKER_URL setting'}
_REDIS_OLD = {'deprecate_by': '2.5', 'remove_by': '4.0',
'alt': 'URL form of CELERY_RESULT_BACKEND'}

searchresult = namedtuple('searchresult', ('namespace', 'key', 'type'))


# logging: processName first introduced in Py 2.6.2 (Issue #1644).
if sys.version_info < (2, 6, 2):
DEFAULT_PROCESS_LOG_FMT = DEFAULT_LOG_FMT


class Option(object):
alt = None
deprecate_by = None
remove_by = None
typemap = dict(string=str, int=int, float=float, any=lambda v: v,
bool=strtobool, dict=dict, tuple=tuple)

def __init__(self, default=None, *args, **kwargs):
self.default = default
self.type = kwargs.get('type') or 'string'
for attr, value in items(kwargs):
setattr(self, attr, value)

def to_python(self, value):
return self.typemap[self.type](value)

def __repr__(self):
return '<Option: type->{0} default->{1!r}>'.format(self.type,
self.default)

NAMESPACES = {
'BROKER': {
'URL': Option(None, type='string'),
'CONNECTION_TIMEOUT': Option(4, type='float'),
'CONNECTION_RETRY': Option(True, type='bool'),
'CONNECTION_MAX_RETRIES': Option(100, type='int'),
'FAILOVER_STRATEGY': Option(None, type='string'),
'HEARTBEAT': Option(None, type='int'),
'HEARTBEAT_CHECKRATE': Option(3.0, type='int'),
'LOGIN_METHOD': Option(None, type='string'),
'POOL_LIMIT': Option(10, type='int'),
'USE_SSL': Option(False, type='bool'),
'TRANSPORT': Option(type='string'),
'TRANSPORT_OPTIONS': Option({}, type='dict'),
'HOST': Option(type='string', **_BROKER_OLD),
'PORT': Option(type='int', **_BROKER_OLD),
'USER': Option(type='string', **_BROKER_OLD),
'PASSWORD': Option(type='string', **_BROKER_OLD),
'VHOST': Option(type='string', **_BROKER_OLD),
},
'CASSANDRA': {
'COLUMN_FAMILY': Option(type='string'),
'DETAILED_MODE': Option(False, type='bool'),
'KEYSPACE': Option(type='string'),
'READ_CONSISTENCY': Option(type='string'),
'SERVERS': Option(type='list'),
'WRITE_CONSISTENCY': Option(type='string'),
},
'CELERY': {
'ACCEPT_CONTENT': Option(DEFAULT_ACCEPT_CONTENT, type='list'),
'ACKS_LATE': Option(False, type='bool'),
'ALWAYS_EAGER': Option(False, type='bool'),
'ANNOTATIONS': Option(type='any'),
'BROADCAST_QUEUE': Option('celeryctl'),
'BROADCAST_EXCHANGE': Option('celeryctl'),
'BROADCAST_EXCHANGE_TYPE': Option('fanout'),
'CACHE_BACKEND': Option(),
'CACHE_BACKEND_OPTIONS': Option({}, type='dict'),
'CHORD_PROPAGATES': Option(True, type='bool'),
'COUCHBASE_BACKEND_SETTINGS': Option(None, type='dict'),
'CREATE_MISSING_QUEUES': Option(True, type='bool'),
'DEFAULT_RATE_LIMIT': Option(type='string'),
'DISABLE_RATE_LIMITS': Option(False, type='bool'),
'DEFAULT_ROUTING_KEY': Option('celery'),
'DEFAULT_QUEUE': Option('celery'),
'DEFAULT_EXCHANGE': Option('celery'),
'DEFAULT_EXCHANGE_TYPE': Option('direct'),
'DEFAULT_DELIVERY_MODE': Option(2, type='string'),
'EAGER_PROPAGATES_EXCEPTIONS': Option(False, type='bool'),
'ENABLE_UTC': Option(True, type='bool'),
'ENABLE_REMOTE_CONTROL': Option(True, type='bool'),
'EVENT_SERIALIZER': Option('json'),
'EVENT_QUEUE_EXPIRES': Option(None, type='float'),
'EVENT_QUEUE_TTL': Option(None, type='float'),
'IMPORTS': Option((), type='tuple'),
'INCLUDE': Option((), type='tuple'),
'IGNORE_RESULT': Option(False, type='bool'),
'MAX_CACHED_RESULTS': Option(100, type='int'),
'MESSAGE_COMPRESSION': Option(type='string'),
'MONGODB_BACKEND_SETTINGS': Option(type='dict'),
'REDIS_HOST': Option(type='string', **_REDIS_OLD),
'REDIS_PORT': Option(type='int', **_REDIS_OLD),
'REDIS_DB': Option(type='int', **_REDIS_OLD),
'REDIS_PASSWORD': Option(type='string', **_REDIS_OLD),
'REDIS_MAX_CONNECTIONS': Option(type='int'),
'RESULT_BACKEND': Option(type='string'),
'RESULT_DB_SHORT_LIVED_SESSIONS': Option(False, type='bool'),
'RESULT_DB_TABLENAMES': Option(type='dict'),
'RESULT_DBURI': Option(),
'RESULT_ENGINE_OPTIONS': Option(type='dict'),
'RESULT_EXCHANGE': Option('celeryresults'),
'RESULT_EXCHANGE_TYPE': Option('direct'),
'RESULT_SERIALIZER': Option('pickle'),
'RESULT_PERSISTENT': Option(None, type='bool'),
'ROUTES': Option(type='any'),
'SEND_EVENTS': Option(False, type='bool'),
'SEND_TASK_ERROR_EMAILS': Option(False, type='bool'),
'SEND_TASK_SENT_EVENT': Option(False, type='bool'),
'STORE_ERRORS_EVEN_IF_IGNORED': Option(False, type='bool'),
'TASK_PUBLISH_RETRY': Option(True, type='bool'),
'TASK_PUBLISH_RETRY_POLICY': Option({
'max_retries': 3,
'interval_start': 0,
'interval_max': 1,
'interval_step': 0.2}, type='dict'),
'TASK_RESULT_EXPIRES': Option(timedelta(days=1), type='float'),
'TASK_SERIALIZER': Option('pickle'),
'TIMEZONE': Option(type='string'),
'TRACK_STARTED': Option(False, type='bool'),
'REDIRECT_STDOUTS': Option(True, type='bool'),
'REDIRECT_STDOUTS_LEVEL': Option('WARNING'),
'QUEUES': Option(type='dict'),
'QUEUE_HA_POLICY': Option(None, type='string'),
'SECURITY_KEY': Option(type='string'),
'SECURITY_CERTIFICATE': Option(type='string'),
'SECURITY_CERT_STORE': Option(type='string'),
'WORKER_DIRECT': Option(False, type='bool'),
},
'CELERYD': {
'AGENT': Option(None, type='string'),
'AUTOSCALER': Option('celery.worker.autoscale:Autoscaler'),
'AUTORELOADER': Option('celery.worker.autoreload:Autoreloader'),
'CONCURRENCY': Option(0, type='int'),
'TIMER': Option(type='string'),
'TIMER_PRECISION': Option(1.0, type='float'),
'FORCE_EXECV': Option(False, type='bool'),
'HIJACK_ROOT_LOGGER': Option(True, type='bool'),
'CONSUMER': Option('celery.worker.consumer:Consumer', type='string'),
'LOG_FORMAT': Option(DEFAULT_PROCESS_LOG_FMT),
'LOG_COLOR': Option(type='bool'),
'LOG_LEVEL': Option('WARN', deprecate_by='2.4', remove_by='4.0',
alt='--loglevel argument'),
'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0',
alt='--logfile argument'),
'MAX_TASKS_PER_CHILD': Option(type='int'),
'POOL': Option(DEFAULT_POOL),
'POOL_PUTLOCKS': Option(True, type='bool'),
'POOL_RESTARTS': Option(False, type='bool'),
'PREFETCH_MULTIPLIER': Option(4, type='int'),
'STATE_DB': Option(),
'TASK_LOG_FORMAT': Option(DEFAULT_TASK_LOG_FMT),
'TASK_SOFT_TIME_LIMIT': Option(type='float'),
'TASK_TIME_LIMIT': Option(type='float'),
'WORKER_LOST_WAIT': Option(10.0, type='float')
},
'CELERYBEAT': {
'SCHEDULE': Option({}, type='dict'),
'SCHEDULER': Option('celery.beat:PersistentScheduler'),
'SCHEDULE_FILENAME': Option('celerybeat-schedule'),
'SYNC_EVERY': Option(0, type='int'),
'MAX_LOOP_INTERVAL': Option(0, type='float'),
'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='4.0',
alt='--loglevel argument'),
'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0',
alt='--logfile argument'),
},
'CELERYMON': {
'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='4.0',
alt='--loglevel argument'),
'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0',
alt='--logfile argument'),
'LOG_FORMAT': Option(DEFAULT_LOG_FMT),
},
'EMAIL': {
'HOST': Option('localhost'),
'PORT': Option(25, type='int'),
'HOST_USER': Option(),
'HOST_PASSWORD': Option(),
'TIMEOUT': Option(2, type='float'),
'USE_SSL': Option(False, type='bool'),
'USE_TLS': Option(False, type='bool'),
},
'SERVER_EMAIL': Option('celery@localhost'),
'ADMINS': Option((), type='tuple'),
}


def flatten(d, ns=''):
stack = deque([(ns, d)])
while stack:
name, space = stack.popleft()
for key, value in items(space):
if isinstance(value, dict):
stack.append((name + key + '_', value))
else:
yield name + key, value
DEFAULTS = dict((key, value.default) for key, value in flatten(NAMESPACES))


def find_deprecated_settings(source):
from celery.utils import warn_deprecated
for name, opt in flatten(NAMESPACES):
if (opt.deprecate_by or opt.remove_by) and getattr(source, name, None):
warn_deprecated(description='The {0!r} setting'.format(name),
deprecation=opt.deprecate_by,
removal=opt.remove_by,
alternative='Use the {0.alt} instead'.format(opt))
return source


@memoize(maxsize=None)
def find(name, namespace='celery'):
# - Try specified namespace first.
namespace = namespace.upper()
try:
return searchresult(
namespace, name.upper(), NAMESPACES[namespace][name.upper()],
)
except KeyError:
# - Try all the other namespaces.
for ns, keys in items(NAMESPACES):
if ns.upper() == name.upper():
return searchresult(None, ns, keys)
elif isinstance(keys, dict):
try:
return searchresult(ns, name.upper(), keys[name.upper()])
except KeyError:
pass
# - See if name is a qualname last.
return searchresult(None, name.upper(), DEFAULTS[name.upper()])

+ 0
- 0
thesisenv/lib/python3.6/site-packages/celery/app/log.py View File


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save