From 4046784e15cddecabab958a8f50c35f2697db366 Mon Sep 17 00:00:00 2001 From: Esther Kleinhenz Date: Mon, 29 Oct 2018 11:30:21 +0100 Subject: [PATCH] added post-office addon --- application/__init__.py | 2 - application/admin.py | 45 +- application/celeryapp.py | 13 - application/email_service.py | 57 - application/forms.py | 25 - application/models.py | 36 - application/tasks.py | 7 - application/views.py | 9 + mysite/settings.py | 20 +- mysite/wsgi.py | 1 - send_mail.log | 27 + .../DESCRIPTION.rst | 428 ------ .../celery-3.1.26.post2.dist-info/METADATA | 500 ------- .../celery-3.1.26.post2.dist-info/RECORD | 496 ------- .../entry_points.txt | 6 - .../metadata.json | 1 - .../top_level.txt | 1 - .../site-packages/celery/__init__.py | 155 -- .../site-packages/celery/__main__.py | 54 - .../python3.6/site-packages/celery/_state.py | 159 --- .../site-packages/celery/app/__init__.py | 150 -- .../site-packages/celery/app/amqp.py | 512 ------- .../site-packages/celery/app/annotations.py | 58 - .../site-packages/celery/app/base.py | 675 --------- .../site-packages/celery/app/builtins.py | 379 ----- .../site-packages/celery/app/control.py | 317 ---- .../site-packages/celery/app/defaults.py | 274 ---- .../python3.6/site-packages/celery/app/log.py | 257 ---- .../site-packages/celery/app/registry.py | 71 - .../site-packages/celery/app/routes.py | 95 -- .../site-packages/celery/app/task.py | 948 ------------ .../site-packages/celery/app/trace.py | 441 ------ .../site-packages/celery/app/utils.py | 266 ---- .../site-packages/celery/apps/beat.py | 151 -- .../site-packages/celery/apps/worker.py | 372 ----- .../site-packages/celery/backends/__init__.py | 68 - .../site-packages/celery/backends/amqp.py | 317 ---- .../site-packages/celery/backends/base.py | 623 -------- .../site-packages/celery/backends/cache.py | 161 --- .../celery/backends/cassandra.py | 196 --- .../celery/backends/couchbase.py | 116 -- .../celery/backends/database/__init__.py | 201 --- .../celery/backends/database/models.py | 74 - .../celery/backends/database/session.py | 62 - .../site-packages/celery/backends/mongodb.py | 264 ---- .../site-packages/celery/backends/redis.py | 295 ---- .../site-packages/celery/backends/rpc.py | 67 - .../python3.6/site-packages/celery/beat.py | 571 -------- .../site-packages/celery/bin/__init__.py | 5 - .../site-packages/celery/bin/amqp.py | 380 ----- .../site-packages/celery/bin/base.py | 668 --------- .../site-packages/celery/bin/beat.py | 100 -- .../site-packages/celery/bin/celery.py | 850 ----------- .../celery/bin/celeryd_detach.py | 181 --- .../site-packages/celery/bin/events.py | 139 -- .../site-packages/celery/bin/graph.py | 191 --- .../site-packages/celery/bin/multi.py | 646 --------- .../site-packages/celery/bin/worker.py | 270 ---- .../site-packages/celery/bootsteps.py | 422 ------ .../python3.6/site-packages/celery/canvas.py | 698 --------- .../celery/concurrency/__init__.py | 29 - .../celery/concurrency/asynpool.py | 1270 ----------------- .../site-packages/celery/concurrency/base.py | 171 --- .../celery/concurrency/eventlet.py | 161 --- .../celery/concurrency/gevent.py | 136 -- .../celery/concurrency/prefork.py | 178 --- .../site-packages/celery/concurrency/solo.py | 30 - .../celery/concurrency/threads.py | 57 - .../site-packages/celery/contrib/abortable.py | 172 --- .../site-packages/celery/contrib/batches.py | 249 ---- .../site-packages/celery/contrib/methods.py | 126 -- .../site-packages/celery/contrib/migrate.py | 365 ----- .../site-packages/celery/contrib/rdb.py | 183 --- .../site-packages/celery/contrib/sphinx.py | 76 - .../site-packages/celery/datastructures.py | 671 --------- .../site-packages/celery/events/__init__.py | 408 ------ .../site-packages/celery/events/cursesmon.py | 544 ------- .../site-packages/celery/events/dumper.py | 109 -- .../site-packages/celery/events/snapshot.py | 114 -- .../site-packages/celery/events/state.py | 656 --------- .../site-packages/celery/exceptions.py | 171 --- .../python3.6/site-packages/celery/five.py | 392 ----- .../site-packages/celery/fixups/django.py | 266 ---- .../site-packages/celery/loaders/__init__.py | 37 - .../site-packages/celery/loaders/app.py | 17 - .../site-packages/celery/loaders/base.py | 299 ---- .../site-packages/celery/loaders/default.py | 52 - .../python3.6/site-packages/celery/local.py | 373 ----- .../site-packages/celery/platforms.py | 813 ----------- .../python3.6/site-packages/celery/result.py | 925 ------------ .../site-packages/celery/schedules.py | 593 -------- .../site-packages/celery/security/__init__.py | 71 - .../celery/security/certificate.py | 93 -- .../site-packages/celery/security/key.py | 27 - .../celery/security/serialization.py | 110 -- .../site-packages/celery/security/utils.py | 35 - .../python3.6/site-packages/celery/signals.py | 76 - .../python3.6/site-packages/celery/states.py | 153 -- .../site-packages/celery/task/__init__.py | 59 - .../site-packages/celery/task/base.py | 179 --- .../site-packages/celery/task/http.py | 220 --- .../site-packages/celery/task/sets.py | 88 -- .../site-packages/celery/task/trace.py | 12 - .../site-packages/celery/tests/__init__.py | 87 -- .../celery/tests/app/test_amqp.py | 228 --- .../celery/tests/app/test_annotations.py | 56 - .../celery/tests/app/test_app.py | 726 ---------- .../celery/tests/app/test_beat.py | 539 ------- .../celery/tests/app/test_builtins.py | 217 --- .../celery/tests/app/test_celery.py | 18 - .../celery/tests/app/test_control.py | 251 ---- .../celery/tests/app/test_defaults.py | 60 - .../celery/tests/app/test_exceptions.py | 35 - .../celery/tests/app/test_loaders.py | 275 ---- .../celery/tests/app/test_log.py | 385 ----- .../celery/tests/app/test_registry.py | 78 - .../celery/tests/app/test_routes.py | 158 -- .../celery/tests/app/test_schedules.py | 717 ---------- .../celery/tests/app/test_utils.py | 46 - .../celery/tests/backends/test_amqp.py | 406 ------ .../celery/tests/backends/test_backends.py | 41 - .../celery/tests/backends/test_base.py | 466 ------ .../celery/tests/backends/test_cache.py | 280 ---- .../celery/tests/backends/test_cassandra.py | 190 --- .../celery/tests/backends/test_couchbase.py | 136 -- .../celery/tests/backends/test_database.py | 196 --- .../celery/tests/backends/test_mongodb.py | 366 ----- .../celery/tests/backends/test_redis.py | 282 ---- .../celery/tests/backends/test_rpc.py | 75 - .../celery/tests/bin/proj/__init__.py | 5 - .../celery/tests/bin/proj/app.py | 5 - .../celery/tests/bin/test_amqp.py | 153 -- .../celery/tests/bin/test_base.py | 332 ----- .../celery/tests/bin/test_beat.py | 196 --- .../celery/tests/bin/test_celery.py | 588 -------- .../celery/tests/bin/test_celeryd_detach.py | 106 -- .../celery/tests/bin/test_celeryevdump.py | 68 - .../celery/tests/bin/test_events.py | 73 - .../celery/tests/bin/test_multi.py | 474 ------ .../celery/tests/bin/test_worker.py | 681 --------- .../site-packages/celery/tests/case.py | 880 ------------ .../tests/compat_modules/test_compat.py | 82 -- .../tests/compat_modules/test_compat_utils.py | 50 - .../tests/compat_modules/test_decorators.py | 39 - .../celery/tests/compat_modules/test_http.py | 158 -- .../tests/compat_modules/test_messaging.py | 13 - .../celery/tests/compat_modules/test_sets.py | 244 ---- .../tests/concurrency/test_concurrency.py | 111 -- .../celery/tests/concurrency/test_eventlet.py | 118 -- .../celery/tests/concurrency/test_gevent.py | 146 -- .../celery/tests/concurrency/test_pool.py | 82 -- .../celery/tests/concurrency/test_prefork.py | 320 ----- .../celery/tests/concurrency/test_solo.py | 24 - .../celery/tests/concurrency/test_threads.py | 60 - .../celery/tests/contrib/test_abortable.py | 49 - .../celery/tests/contrib/test_methods.py | 34 - .../celery/tests/contrib/test_migrate.py | 314 ---- .../celery/tests/contrib/test_rdb.py | 105 -- .../celery/tests/events/test_cursesmon.py | 70 - .../celery/tests/events/test_events.py | 260 ---- .../celery/tests/events/test_snapshot.py | 130 -- .../celery/tests/events/test_state.py | 582 -------- .../celery/tests/fixups/test_django.py | 301 ---- .../celery/tests/functional/case.py | 178 --- .../celery/tests/functional/tasks.py | 24 - .../celery/tests/security/__init__.py | 68 - .../celery/tests/security/case.py | 16 - .../celery/tests/security/test_certificate.py | 78 - .../celery/tests/security/test_key.py | 26 - .../celery/tests/security/test_security.py | 110 -- .../tests/security/test_serialization.py | 64 - .../celery/tests/slow/__init__.py | 0 .../celery/tests/tasks/__init__.py | 0 .../celery/tests/tasks/test_canvas.py | 346 ----- .../celery/tests/tasks/test_chord.py | 235 --- .../celery/tests/tasks/test_context.py | 67 - .../celery/tests/tasks/test_result.py | 731 ---------- .../celery/tests/tasks/test_states.py | 31 - .../celery/tests/tasks/test_tasks.py | 464 ------ .../celery/tests/tasks/test_trace.py | 222 --- .../celery/tests/utils/__init__.py | 0 .../celery/tests/utils/test_datastructures.py | 360 ----- .../celery/tests/utils/test_dispatcher.py | 138 -- .../celery/tests/utils/test_encoding.py | 20 - .../celery/tests/utils/test_functional.py | 185 --- .../celery/tests/utils/test_imports.py | 44 - .../celery/tests/utils/test_local.py | 364 ----- .../celery/tests/utils/test_mail.py | 53 - .../celery/tests/utils/test_pickle.py | 51 - .../celery/tests/utils/test_platforms.py | 713 --------- .../celery/tests/utils/test_saferef.py | 94 -- .../celery/tests/utils/test_serialization.py | 42 - .../celery/tests/utils/test_sysinfo.py | 33 - .../celery/tests/utils/test_term.py | 89 -- .../celery/tests/utils/test_text.py | 88 -- .../celery/tests/utils/test_threads.py | 109 -- .../celery/tests/utils/test_timer2.py | 187 --- .../celery/tests/utils/test_timeutils.py | 267 ---- .../celery/tests/utils/test_utils.py | 108 -- .../celery/tests/worker/__init__.py | 0 .../celery/tests/worker/test_autoreload.py | 328 ----- .../celery/tests/worker/test_autoscale.py | 198 --- .../celery/tests/worker/test_bootsteps.py | 338 ----- .../celery/tests/worker/test_components.py | 38 - .../celery/tests/worker/test_consumer.py | 512 ------- .../celery/tests/worker/test_control.py | 601 -------- .../celery/tests/worker/test_heartbeat.py | 73 - .../celery/tests/worker/test_hub.py | 342 ----- .../celery/tests/worker/test_loops.py | 425 ------ .../celery/tests/worker/test_request.py | 969 ------------- .../celery/tests/worker/test_revoke.py | 13 - .../celery/tests/worker/test_state.py | 161 --- .../celery/tests/worker/test_strategy.py | 139 -- .../celery/tests/worker/test_worker.py | 1128 --------------- .../site-packages/celery/utils/__init__.py | 407 ------ .../site-packages/celery/utils/compat.py | 1 - .../site-packages/celery/utils/debug.py | 167 --- .../celery/utils/dispatch/__init__.py | 6 - .../celery/utils/dispatch/saferef.py | 286 ---- .../celery/utils/dispatch/signal.py | 241 ---- .../site-packages/celery/utils/encoding.py | 14 - .../site-packages/celery/utils/functional.py | 323 ----- .../site-packages/celery/utils/imports.py | 114 -- .../site-packages/celery/utils/iso8601.py | 77 - .../site-packages/celery/utils/log.py | 301 ---- .../site-packages/celery/utils/mail.py | 190 --- .../site-packages/celery/utils/objects.py | 91 -- .../celery/utils/serialization.py | 167 --- .../site-packages/celery/utils/sysinfo.py | 45 - .../site-packages/celery/utils/term.py | 162 --- .../site-packages/celery/utils/text.py | 86 -- .../site-packages/celery/utils/threads.py | 329 ----- .../site-packages/celery/utils/timer2.py | 144 -- .../site-packages/celery/utils/timeutils.py | 370 ----- .../site-packages/celery/worker/__init__.py | 393 ----- .../site-packages/celery/worker/autoreload.py | 302 ---- .../site-packages/celery/worker/autoscale.py | 162 --- .../site-packages/celery/worker/components.py | 247 ---- .../site-packages/celery/worker/consumer.py | 887 ------------ .../site-packages/celery/worker/control.py | 385 ----- .../site-packages/celery/worker/heartbeat.py | 58 - .../site-packages/celery/worker/job.py | 595 -------- .../site-packages/celery/worker/loops.py | 108 -- .../site-packages/celery/worker/pidbox.py | 116 -- .../site-packages/celery/worker/request.py | 536 ------- .../site-packages/celery/worker/state.py | 246 ---- .../site-packages/celery/worker/strategy.py | 95 -- .../apps => django_common}/__init__.py | 0 .../site-packages/django_common/admin.py | 460 ++++++ .../django_common/auth_backends.py | 23 + .../site-packages/django_common/classmaker.py | 64 + .../site-packages/django_common/compat.py | 33 + .../django_common/context_processors.py | 14 + .../site-packages/django_common/db_fields.py | 266 ++++ .../site-packages/django_common/decorators.py | 59 + .../django_common/email_backends.py | 70 + .../site-packages/django_common/helper.py | 232 +++ .../site-packages/django_common/http.py | 58 + .../management}/__init__.py | 0 .../management/commands}/__init__.py | 0 .../commands/generate_secret_key.py | 34 + .../management/commands/scaffold.py | 51 + .../site-packages/django_common/middleware.py | 65 + .../site-packages/django_common/mixin.py | 10 + .../site-packages/django_common/scaffold.py | 757 ++++++++++ .../site-packages/django_common/session.py | 83 ++ .../site-packages/django_common/settings.py | 3 + .../static/django_common/js/ajax_form.js | 71 + .../static/django_common/js/common.js | 12 + .../templates/common/admin/nested.html | 79 + .../common/admin/nested_tabular.html | 506 +++++++ .../common/fragments/checkbox_field.html | 12 + .../common/fragments/form_field.html | 14 + .../fragments/multi_checkbox_field.html | 14 + .../common/fragments/radio_field.html | 15 + .../templatetags}/__init__.py | 0 .../django_common/templatetags/custom_tags.py | 95 ++ .../site-packages/django_common/tests.py | 38 + .../site-packages/django_common/tzinfo.py | 177 +++ .../PKG-INFO | 327 +++++ .../SOURCES.txt | 44 + .../dependency_links.txt | 1 + .../installed-files.txt | 62 + .../not-zip-safe | 1 + .../requires.txt | 1 + .../top_level.txt | 1 + .../django_cron-0.5.1-py3.6.egg-info/PKG-INFO | 40 + .../SOURCES.txt | 29 + .../dependency_links.txt | 1 + .../installed-files.txt | 40 + .../not-zip-safe | 1 + .../requires.txt | 2 + .../top_level.txt | 1 + .../site-packages/django_cron/__init__.py | 234 +++ .../site-packages/django_cron/admin.py | 56 + .../backends/__init__.py | 0 .../backends/lock}/__init__.py | 0 .../django_cron/backends/lock/base.py | 68 + .../django_cron/backends/lock/cache.py | 73 + .../django_cron/backends/lock/file.py | 68 + .../site-packages/django_cron/cron.py | 46 + .../site-packages/django_cron/helpers.py | 29 + .../management}/__init__.py | 0 .../management/commands}/__init__.py | 0 .../management/commands/runcrons.py | 80 ++ .../django_cron/migrations/0001_initial.py | 29 + ...move_max_length_from_CronJobLog_message.py | 19 + .../migrations}/__init__.py | 0 .../site-packages/django_cron/models.py | 28 + .../site-packages/django_cron/tests.py | 181 +++ .../DESCRIPTION.rst | 654 +++++++++ .../INSTALLER | 0 .../METADATA | 685 +++++++++ .../django_post_office-3.1.0.dist-info/RECORD | 95 ++ .../WHEEL | 0 .../metadata.json | 1 + .../top_level.txt | 1 + .../jsonfield-2.0.2.dist-info/DESCRIPTION.rst | 121 ++ .../jsonfield-2.0.2.dist-info/INSTALLER | 1 + .../jsonfield-2.0.2.dist-info/METADATA | 143 ++ .../jsonfield-2.0.2.dist-info/RECORD | 19 + .../jsonfield-2.0.2.dist-info/WHEEL | 6 + .../jsonfield-2.0.2.dist-info/metadata.json | 1 + .../jsonfield-2.0.2.dist-info/top_level.txt | 1 + .../site-packages/jsonfield/__init__.py | 1 + .../site-packages/jsonfield/encoder.py | 58 + .../site-packages/jsonfield/fields.py | 183 +++ .../site-packages/jsonfield/models.py | 1 + .../site-packages/jsonfield/subclassing.py | 62 + .../site-packages/jsonfield/tests.py | 392 +++++ .../site-packages/post_office/__init__.py | 5 + .../site-packages/post_office/admin.py | 153 ++ .../site-packages/post_office/apps.py | 7 + .../site-packages/post_office/backends.py | 56 + .../site-packages/post_office/cache.py | 26 + .../site-packages/post_office/compat.py | 46 + .../site-packages/post_office/connections.py | 44 + .../site-packages/post_office/fields.py | 58 + .../locale/de/LC_MESSAGES/django.mo | Bin 0 -> 1632 bytes .../locale/de/LC_MESSAGES/django.po | 130 ++ .../locale/it/LC_MESSAGES/django.mo | Bin 0 -> 1611 bytes .../locale/it/LC_MESSAGES/django.po | 130 ++ .../locale/pl/LC_MESSAGES/django.mo | Bin 0 -> 2698 bytes .../locale/pl/LC_MESSAGES/django.po | 206 +++ .../locale/ru_RU/LC_MESSAGES/django.mo | Bin 0 -> 3274 bytes .../locale/ru_RU/LC_MESSAGES/django.po | 211 +++ .../site-packages/post_office/lockfile.py | 148 ++ .../site-packages/post_office/logutils.py | 37 + .../site-packages/post_office/mail.py | 305 ++++ .../management}/__init__.py | 0 .../management/commands}/__init__.py | 0 .../management/commands/cleanup_mail.py | 35 + .../management/commands/send_queued_mail.py | 60 + .../post_office/migrations/0001_initial.py | 93 ++ .../0002_add_i18n_and_backend_alias.py | 89 ++ .../migrations/0003_longer_subject.py | 25 + .../migrations/0004_auto_20160607_0901.py | 120 ++ .../migrations/0005_auto_20170515_0013.py | 19 + .../migrations/0006_attachment_mimetype.py | 19 + .../migrations/0007_auto_20170731_1342.py | 19 + .../migrations}/__init__.py | 0 .../site-packages/post_office/models.py | 284 ++++ .../site-packages/post_office/settings.py | 95 ++ .../post_office/test_settings.py | 84 ++ .../site-packages/post_office/test_urls.py | 6 + .../post_office/tests/__init__.py | 8 + .../post_office/tests/test_backends.py | 113 ++ .../post_office/tests/test_cache.py | 41 + .../post_office/tests/test_commands.py | 150 ++ .../post_office/tests/test_connections.py | 13 + .../post_office/tests/test_lockfile.py | 75 + .../post_office/tests/test_mail.py | 400 ++++++ .../post_office/tests/test_models.py | 332 +++++ .../post_office/tests/test_utils.py | 203 +++ .../post_office/tests/test_views.py | 35 + .../site-packages/post_office/utils.py | 138 ++ .../site-packages/post_office/validators.py | 50 + .../site-packages/post_office/views.py | 1 + .../uWSGI-2.0.17.1-py3.6.egg-info/PKG-INFO | 20 + .../uWSGI-2.0.17.1-py3.6.egg-info/SOURCES.txt | 6 + .../dependency_links.txt | 1 + .../installed-files.txt | 6 + .../top_level.txt | 1 + .../site-packages/uwsgidecorators.py | 419 ++++++ 384 files changed, 11771 insertions(+), 56980 deletions(-) delete mode 100644 application/celeryapp.py delete mode 100644 application/email_service.py delete mode 100644 application/tasks.py create mode 100644 send_mail.log delete mode 100644 thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/DESCRIPTION.rst delete mode 100644 thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/METADATA delete mode 100644 thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/RECORD delete mode 100644 thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/entry_points.txt delete mode 100644 thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/metadata.json delete mode 100644 thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/top_level.txt delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/__init__.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/__main__.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/_state.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/__init__.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/amqp.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/annotations.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/base.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/builtins.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/control.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/defaults.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/log.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/registry.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/routes.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/task.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/trace.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/utils.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/apps/beat.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/apps/worker.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/backends/__init__.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/backends/amqp.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/backends/base.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/backends/cache.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/backends/cassandra.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/backends/couchbase.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/backends/database/__init__.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/backends/database/models.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/backends/database/session.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/backends/mongodb.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/backends/redis.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/backends/rpc.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/beat.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/bin/__init__.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/bin/amqp.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/bin/base.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/bin/beat.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/bin/celery.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/bin/celeryd_detach.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/bin/events.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/bin/graph.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/bin/multi.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/bin/worker.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/bootsteps.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/canvas.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/concurrency/__init__.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/concurrency/asynpool.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/concurrency/base.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/concurrency/eventlet.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/concurrency/gevent.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/concurrency/prefork.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/concurrency/solo.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/concurrency/threads.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/contrib/abortable.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/contrib/batches.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/contrib/methods.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/contrib/migrate.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/contrib/rdb.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/contrib/sphinx.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/datastructures.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/events/__init__.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/events/cursesmon.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/events/dumper.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/events/snapshot.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/events/state.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/exceptions.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/five.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/fixups/django.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/loaders/__init__.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/loaders/app.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/loaders/base.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/loaders/default.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/local.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/platforms.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/result.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/schedules.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/security/__init__.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/security/certificate.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/security/key.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/security/serialization.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/security/utils.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/signals.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/states.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/task/__init__.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/task/base.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/task/http.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/task/sets.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/task/trace.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/__init__.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_amqp.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_annotations.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_app.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_beat.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_builtins.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_celery.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_control.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_defaults.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_exceptions.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_loaders.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_log.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_registry.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_routes.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_schedules.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_utils.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_amqp.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_backends.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_base.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_cache.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_cassandra.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_couchbase.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_database.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_mongodb.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_redis.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_rpc.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/bin/proj/__init__.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/bin/proj/app.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_amqp.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_base.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_beat.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celery.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celeryd_detach.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celeryevdump.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_events.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_multi.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_worker.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/case.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_compat.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_compat_utils.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_decorators.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_http.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_messaging.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_sets.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/concurrency/test_concurrency.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/concurrency/test_eventlet.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/concurrency/test_gevent.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/concurrency/test_pool.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/concurrency/test_prefork.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/concurrency/test_solo.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/concurrency/test_threads.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/contrib/test_abortable.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/contrib/test_methods.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/contrib/test_migrate.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/contrib/test_rdb.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/events/test_cursesmon.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/events/test_events.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/events/test_snapshot.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/events/test_state.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/fixups/test_django.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/functional/case.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/functional/tasks.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/security/__init__.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/security/case.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/security/test_certificate.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/security/test_key.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/security/test_security.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/security/test_serialization.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/slow/__init__.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/tasks/__init__.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_canvas.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_chord.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_context.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_result.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_states.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_tasks.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_trace.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/__init__.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_datastructures.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_dispatcher.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_encoding.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_functional.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_imports.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_local.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_mail.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_pickle.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_platforms.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_saferef.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_serialization.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_sysinfo.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_term.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_text.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_threads.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_timer2.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_timeutils.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_utils.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/__init__.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_autoreload.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_autoscale.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_bootsteps.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_components.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_consumer.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_control.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_heartbeat.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_hub.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_loops.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_request.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_revoke.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_state.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_strategy.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_worker.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/__init__.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/compat.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/debug.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/__init__.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/saferef.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/signal.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/encoding.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/functional.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/imports.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/iso8601.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/log.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/mail.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/objects.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/serialization.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/sysinfo.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/term.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/text.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/threads.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/timer2.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/timeutils.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/__init__.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/autoreload.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/autoscale.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/components.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/consumer.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/control.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/heartbeat.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/job.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/loops.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/pidbox.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/request.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/state.py delete mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/strategy.py rename thesisenv/lib/python3.6/site-packages/{celery/apps => django_common}/__init__.py (100%) create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/admin.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/auth_backends.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/classmaker.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/compat.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/context_processors.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/db_fields.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/decorators.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/email_backends.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/helper.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/http.py rename thesisenv/lib/python3.6/site-packages/{celery/contrib => django_common/management}/__init__.py (100%) rename thesisenv/lib/python3.6/site-packages/{celery/fixups => django_common/management/commands}/__init__.py (100%) create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/management/commands/generate_secret_key.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/management/commands/scaffold.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/middleware.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/mixin.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/scaffold.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/session.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/settings.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/static/django_common/js/ajax_form.js create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/static/django_common/js/common.js create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/templates/common/admin/nested.html create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/templates/common/admin/nested_tabular.html create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/templates/common/fragments/checkbox_field.html create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/templates/common/fragments/form_field.html create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/templates/common/fragments/multi_checkbox_field.html create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/templates/common/fragments/radio_field.html rename thesisenv/lib/python3.6/site-packages/{celery/tests/app => django_common/templatetags}/__init__.py (100%) create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/templatetags/custom_tags.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/tests.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_common/tzinfo.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/PKG-INFO create mode 100644 thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/SOURCES.txt create mode 100644 thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/dependency_links.txt create mode 100644 thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/installed-files.txt create mode 100644 thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/not-zip-safe create mode 100644 thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/requires.txt create mode 100644 thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/top_level.txt create mode 100644 thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/PKG-INFO create mode 100644 thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/SOURCES.txt create mode 100644 thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/dependency_links.txt create mode 100644 thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/installed-files.txt create mode 100644 thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/not-zip-safe create mode 100644 thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/requires.txt create mode 100644 thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/top_level.txt create mode 100644 thesisenv/lib/python3.6/site-packages/django_cron/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_cron/admin.py rename thesisenv/lib/python3.6/site-packages/{celery/tests => django_cron}/backends/__init__.py (100%) rename thesisenv/lib/python3.6/site-packages/{celery/tests/bin => django_cron/backends/lock}/__init__.py (100%) create mode 100644 thesisenv/lib/python3.6/site-packages/django_cron/backends/lock/base.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_cron/backends/lock/cache.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_cron/backends/lock/file.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_cron/cron.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_cron/helpers.py rename thesisenv/lib/python3.6/site-packages/{celery/tests/compat_modules => django_cron/management}/__init__.py (100%) rename thesisenv/lib/python3.6/site-packages/{celery/tests/concurrency => django_cron/management/commands}/__init__.py (100%) create mode 100644 thesisenv/lib/python3.6/site-packages/django_cron/management/commands/runcrons.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_cron/migrations/0001_initial.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_cron/migrations/0002_remove_max_length_from_CronJobLog_message.py rename thesisenv/lib/python3.6/site-packages/{celery/tests/contrib => django_cron/migrations}/__init__.py (100%) create mode 100644 thesisenv/lib/python3.6/site-packages/django_cron/models.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_cron/tests.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_post_office-3.1.0.dist-info/DESCRIPTION.rst rename thesisenv/lib/python3.6/site-packages/{celery-3.1.26.post2.dist-info => django_post_office-3.1.0.dist-info}/INSTALLER (100%) create mode 100644 thesisenv/lib/python3.6/site-packages/django_post_office-3.1.0.dist-info/METADATA create mode 100644 thesisenv/lib/python3.6/site-packages/django_post_office-3.1.0.dist-info/RECORD rename thesisenv/lib/python3.6/site-packages/{celery-3.1.26.post2.dist-info => django_post_office-3.1.0.dist-info}/WHEEL (100%) create mode 100644 thesisenv/lib/python3.6/site-packages/django_post_office-3.1.0.dist-info/metadata.json create mode 100644 thesisenv/lib/python3.6/site-packages/django_post_office-3.1.0.dist-info/top_level.txt create mode 100644 thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/DESCRIPTION.rst create mode 100644 thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/INSTALLER create mode 100644 thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/METADATA create mode 100644 thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/RECORD create mode 100644 thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/WHEEL create mode 100644 thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/metadata.json create mode 100644 thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/top_level.txt create mode 100644 thesisenv/lib/python3.6/site-packages/jsonfield/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/jsonfield/encoder.py create mode 100644 thesisenv/lib/python3.6/site-packages/jsonfield/fields.py create mode 100644 thesisenv/lib/python3.6/site-packages/jsonfield/models.py create mode 100644 thesisenv/lib/python3.6/site-packages/jsonfield/subclassing.py create mode 100644 thesisenv/lib/python3.6/site-packages/jsonfield/tests.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/admin.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/apps.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/backends.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/cache.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/compat.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/connections.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/fields.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/locale/de/LC_MESSAGES/django.mo create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/locale/de/LC_MESSAGES/django.po create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/locale/it/LC_MESSAGES/django.mo create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/locale/it/LC_MESSAGES/django.po create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/locale/pl/LC_MESSAGES/django.mo create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/locale/pl/LC_MESSAGES/django.po create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/locale/ru_RU/LC_MESSAGES/django.mo create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/locale/ru_RU/LC_MESSAGES/django.po create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/lockfile.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/logutils.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/mail.py rename thesisenv/lib/python3.6/site-packages/{celery/tests/events => post_office/management}/__init__.py (100%) rename thesisenv/lib/python3.6/site-packages/{celery/tests/fixups => post_office/management/commands}/__init__.py (100%) create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/management/commands/cleanup_mail.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/management/commands/send_queued_mail.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/migrations/0001_initial.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/migrations/0002_add_i18n_and_backend_alias.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/migrations/0003_longer_subject.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/migrations/0004_auto_20160607_0901.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/migrations/0005_auto_20170515_0013.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/migrations/0006_attachment_mimetype.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/migrations/0007_auto_20170731_1342.py rename thesisenv/lib/python3.6/site-packages/{celery/tests/functional => post_office/migrations}/__init__.py (100%) create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/models.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/settings.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/test_settings.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/test_urls.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/tests/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/tests/test_backends.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/tests/test_cache.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/tests/test_commands.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/tests/test_connections.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/tests/test_lockfile.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/tests/test_mail.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/tests/test_models.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/tests/test_utils.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/tests/test_views.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/utils.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/validators.py create mode 100644 thesisenv/lib/python3.6/site-packages/post_office/views.py create mode 100644 thesisenv/lib/python3.6/site-packages/uWSGI-2.0.17.1-py3.6.egg-info/PKG-INFO create mode 100644 thesisenv/lib/python3.6/site-packages/uWSGI-2.0.17.1-py3.6.egg-info/SOURCES.txt create mode 100644 thesisenv/lib/python3.6/site-packages/uWSGI-2.0.17.1-py3.6.egg-info/dependency_links.txt create mode 100644 thesisenv/lib/python3.6/site-packages/uWSGI-2.0.17.1-py3.6.egg-info/installed-files.txt create mode 100644 thesisenv/lib/python3.6/site-packages/uWSGI-2.0.17.1-py3.6.egg-info/top_level.txt create mode 100644 thesisenv/lib/python3.6/site-packages/uwsgidecorators.py diff --git a/application/__init__.py b/application/__init__.py index 1bf50a5..06ebbce 100644 --- a/application/__init__.py +++ b/application/__init__.py @@ -1,5 +1,3 @@ from __future__ import absolute_import, unicode_literals # This will make sure celery is always imported when # Django starts so that shared_task will use this app. -from .celeryapp import app as celery_app -__all__ = ['celery_app'] \ No newline at end of file diff --git a/application/admin.py b/application/admin.py index 02ff7b2..b207f53 100644 --- a/application/admin.py +++ b/application/admin.py @@ -4,9 +4,6 @@ from django.contrib.auth.admin import UserAdmin as BaseUserAdmin from django.contrib.auth.models import User from .models import Post, CustomUser -from .models import ScheduledReport, ReportRecipient, ScheduledReportGroup -from .forms import ScheduledReportForm - class CustomUserInline(admin.StackedInline): model = CustomUser @@ -21,44 +18,4 @@ class UserAdmin(BaseUserAdmin): admin.site.unregister(User) admin.site.register(User, UserAdmin) -admin.site.register(Post) - - -class ReportRecipientAdmin(admin.TabularInline): - model = ReportRecipient -class ScheduledReportAdmin(admin.ModelAdmin): - """ - List display for Scheduled reports in Django admin - """ - model = ScheduledReport - list_display = ('id', 'get_recipients') - inlines = [ - ReportRecipientAdmin - ] - form = ScheduledReportForm - def get_recipients(self, model): - recipients = model.reportrecep.all().values_list('email', flat=True) - if not recipients: - return 'No recipients added' - recipient_list = '' - for recipient in recipients: - recipient_list = recipient_list + recipient + ', ' - return recipient_list[:-2] - get_recipients.short_description = 'Recipients' - get_recipients.allow_tags = True -class ScheduledReportGroupAdmin(admin.ModelAdmin): - """ - List display for ScheduledReportGroup Admin - """ - model = ScheduledReportGroup - list_display = ('get_scheduled_report_name','get_report_name') - def get_scheduled_report_name(self, model): - return model.scheduled_report.subject - def get_report_name(self, model): - return model.report.name - get_scheduled_report_name.short_description = "Scheduled Report Name" - get_report_name.short_description = "Report Name" - show_change_link = True - get_report_name.allow_tags = True -admin.site.register(ScheduledReport, ScheduledReportAdmin) -admin.site.register(ScheduledReportGroup, ScheduledReportGroupAdmin) \ No newline at end of file +admin.site.register(Post) \ No newline at end of file diff --git a/application/celeryapp.py b/application/celeryapp.py deleted file mode 100644 index 7848051..0000000 --- a/application/celeryapp.py +++ /dev/null @@ -1,13 +0,0 @@ -from __future__ import absolute_import -import os -from celery import Celery -# set the default Django settings module for the 'celery' program. -os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'application.settings') -from django.conf import settings - -app = Celery('application') -# Using a string here means the worker don't have to serialize -# the configuration object to child processes. -app.config_from_object('django.conf:settings') -# Load task modules from all registered Django app configs. -app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) \ No newline at end of file diff --git a/application/email_service.py b/application/email_service.py deleted file mode 100644 index 574dede..0000000 --- a/application/email_service.py +++ /dev/null @@ -1,57 +0,0 @@ -from datetime import datetime, timedelta -from django.core.mail import send_mail -from django.template import Template, Context -from django.http import HttpResponse -from django.conf import settings -from .models import ScheduledReport, ScheduledReportGroup, ReportRecipient -class ScheduledReportConfig(object): - def __init__(self, scheduled_report): - """ - Expects a scheduled report object and inititializes - its own scheduled_report attribute with it - """ - self.scheduled_report = scheduled_report - def get_report_config(self): - """ - Returns the configuration related to a scheduled report, needed - to populate the email - """ - return { - "template_context": self._get_related_reports_data(), - "recipients": self._get_report_recipients() - } - def _get_related_reports_data(self): - """ - Returns the list of reports data which needs to be sent out in a scheduled report - """ - pass - def _get_report_recipients(self): - """ - Returns the recipient list for a scheduled report - """ - pass -def create_email_data(content=None): - content = ''' - - - ''' + str(content) + '''''' - return content -def send_emails(): - current_time = datetime.utcnow() - scheduled_reports = ScheduledReport.objects.filter(next_run_at__lt = current_time) - for scheduled_report in scheduled_reports: - report_config = ScheduledReportConfig(scheduled_report).get_report_config() - """ Specify the template path you want to send out in the email. """ - template = Template(create_email_data('path/to/your/email_template.html')) - """ //Create your email html using Django's context processor """ - report_template = template.render(Context(report_config['template_context'])) - scheduled_report.save() - if not scheduled_report.subject: - """ Handle exception for subject not provided """ - if not report_config['recipients']: - """ Handle exception for recipients not provided """ - send_mail( - scheduled_report.subject, 'Here is the message.', - settings.EMAIL_HOST_USER, report_config['recipients'], - fail_silently=False, html_message=report_template - ) \ No newline at end of file diff --git a/application/forms.py b/application/forms.py index 188d7ce..724d9c5 100644 --- a/application/forms.py +++ b/application/forms.py @@ -9,7 +9,6 @@ from django.contrib.auth.forms import UserCreationForm, UserChangeForm from datetime import datetime from croniter import croniter from django.forms import ModelForm, ValidationError -from .models import ScheduledReport class PostForm(forms.ModelForm): class Meta: @@ -21,27 +20,3 @@ class NewTagForm(forms.ModelForm): class Meta: model = CustomUser fields = ['m_tags'] - - -class ScheduledReportForm(ModelForm): - class Meta: - model = ScheduledReport - fields = ['subject', 'cron_expression'] - fields = ['subject', 'cron_expression'] - help_texts = {'cron_expression': 'Scheduled time is considered in UTC'} - def clean(self): - cleaned_data = super(ScheduledReportForm, self).clean() - cron_expression = cleaned_data.get("cron_expression") - try: - iter = croniter(cron_expression, datetime.now()) - except: - raise ValidationError("Incorrect cron expression:\ - The information you must include is (in order of appearance):\ - A number (or list of numbers, or range of numbers), m, representing the minute of the hour\ - A number (or list of numbers, or range of numbers), h, representing the hour of the day\ - A number (or list of numbers, or range of numbers), dom, representing the day of the month\ - A number (or list, or range), or name (or list of names), mon, representing the month of the year\ - A number (or list, or range), or name (or list of names), dow, representing the day of the week\ - The asterisks (*) in our entry tell cron that for that unit of time, the job should be run every.\ - Eg. */5 * * * * cron for executing every 5 mins") - return cleaned_data \ No newline at end of file diff --git a/application/models.py b/application/models.py index 1f1738a..9da3035 100644 --- a/application/models.py +++ b/application/models.py @@ -27,39 +27,3 @@ class Post(models.Model): def __str__(self): return self.title - -class Report(models.Model): - report_text = models.TextField() - -class ScheduledReport(models.Model): - """ - Contains email subject and cron expression,to evaluate when the email has to be sent - """ - subject = models.CharField(max_length=200) - last_run_at = models.DateTimeField(null=True, blank=True) - next_run_at = models.DateTimeField(null=True, blank=True) - cron_expression = models.CharField(max_length=200) - def save(self, *args, **kwargs): - """ - function to evaluate "next_run_at" using the cron expression, so that it is updated once the report is sent. - """ - self.last_run_at = datetime.now() - iter = croniter(self.cron_expression, self.last_run_at) - self.next_run_at = iter.get_next(datetime) - super(ScheduledReport, self).save(*args, **kwargs) - def __unicode__(self): - return self.subject - -class ScheduledReportGroup(models.Model): - """ - Many to many mapping between reports which will be sent out in a scheduled report - """ - report = models.ForeignKey(Report, related_name='report', on_delete=models.CASCADE) - scheduled_report = models.ForeignKey(ScheduledReport, - related_name='relatedscheduledreport', on_delete=models.CASCADE) -class ReportRecipient(models.Model): - """ - Stores all the recipients of the given scheduled report - """ - email = models.EmailField() - scheduled_report = models.ForeignKey(ScheduledReport, related_name='reportrecep', on_delete=models.CASCADE) \ No newline at end of file diff --git a/application/tasks.py b/application/tasks.py deleted file mode 100644 index ccc572f..0000000 --- a/application/tasks.py +++ /dev/null @@ -1,7 +0,0 @@ -from celery.task.schedules import crontab -from celery.decorators import periodic_task -from .email_service import send_emails -# this will run every minute, see http://celeryproject.org/docs/reference/celery.task.schedules.html#celery.task.schedules.crontab -@periodic_task(run_every=crontab(hour="*", minute="*", day_of_week="*")) -def trigger_emails(): - send_emails() \ No newline at end of file diff --git a/application/views.py b/application/views.py index 7c6de77..251d577 100644 --- a/application/views.py +++ b/application/views.py @@ -14,6 +14,7 @@ import collections from taggit_templatetags2.views import TagCanvasListView from django.contrib.auth.models import User from django.contrib import messages +from post_office import mail @@ -192,3 +193,11 @@ def blog_search_list_view(request): def tag_cloud(request): return render(request, 'tag_cloud.html', {}) + +mail.send( + 'esther.kleinhenz@web.de', # List of email addresses also accepted + 'esther.kleinhenz@web.de', + subject='My email', + message='Hi there!', + html_message='Hi there!', +) diff --git a/mysite/settings.py b/mysite/settings.py index dc0e8b4..390b4af 100644 --- a/mysite/settings.py +++ b/mysite/settings.py @@ -13,7 +13,6 @@ https://docs.djangoproject.com/en/2.0/ref/settings/ import os import re import socket -import djcelery # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) @@ -47,8 +46,8 @@ INSTALLED_APPS = [ 'application', 'taggit', 'taggit_templatetags2', - 'djcelery', 'kombu.transport.django', + "post_office", ] MIDDLEWARE = [ @@ -254,11 +253,12 @@ if DEBUG: 'INTERCEPT_REDIRECTS': False, } -# Celery settings -BROKER_URL = 'django://' -CELERY_ACCEPT_CONTENT = ['json'] -CELERY_TASK_SERIALIZER = 'json' -CELERY_RESULT_SERIALIZER = 'json' -CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend' -CELERYBEAT_SCHEDULER = "djcelery.schedulers.DatabaseScheduler" -djcelery.setup_loader() \ No newline at end of file + +EMAIL_BACKEND = 'post_office.EmailBackend' + +EMAIL_HOST = 'smtp.web.de' +EMAIL_HOST_USER = "esther.kleinhenz@web.de" +EMAIL_PORT = 25 # default smtp port +EMAIL_HOST_PASSWORD = "2mSchneeinMikkeli" +EMAIL_USE_TLS = True +DEFAULT_FROM_EMAIL = 'your.generic.test.email@web.de' \ No newline at end of file diff --git a/mysite/wsgi.py b/mysite/wsgi.py index ffbfe4f..a6fb5c0 100644 --- a/mysite/wsgi.py +++ b/mysite/wsgi.py @@ -8,7 +8,6 @@ https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/ """ import os - from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings") diff --git a/send_mail.log b/send_mail.log new file mode 100644 index 0000000..b7538d0 --- /dev/null +++ b/send_mail.log @@ -0,0 +1,27 @@ +[INFO]2018-10-29 09:59:41 PID 87687: Acquiring lock for sending queued emails at /var/folders/xd/tps872gx1lj4jy82m1m2tq1c0000gn/T/post_office.lock +[INFO]2018-10-29 09:59:41 PID 87687: Started sending 31 emails with 1 processes. +[INFO]2018-10-29 09:59:41 PID 87687: Process started, sending 31 emails +[INFO]2018-10-29 09:59:45 PID 87687: Process finished, 31 attempted, 20 sent, 11 failed +[INFO]2018-10-29 09:59:45 PID 87687: 31 emails attempted, 20 sent, 11 failed + --- Development sqlite database --- + --- Development stage --- +-bash: 2****: command not found +-bash: application: command not found +-bash: application: command not found +-bash: application: command not found +-bash: application: command not found +-bash: application: command not found +-bash: application: command not found +-bash: application: command not found +-bash: application: command not found +-bash: application: command not found +-bash: application: command not found +-bash: application: command not found +[INFO]2018-10-29 10:32:41 PID 89919: Acquiring lock for sending queued emails at /var/folders/xd/tps872gx1lj4jy82m1m2tq1c0000gn/T/post_office.lock +[INFO]2018-10-29 10:32:41 PID 89919: Started sending 1 emails with 1 processes. +[INFO]2018-10-29 10:32:41 PID 89919: Process started, sending 1 emails +[INFO]2018-10-29 10:32:42 PID 89919: Process finished, 1 attempted, 1 sent, 0 failed +[INFO]2018-10-29 10:32:42 PID 89919: 1 emails attempted, 1 sent, 0 failed + --- Development sqlite database --- + --- Development stage --- +-bash: application: command not found diff --git a/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/DESCRIPTION.rst b/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/DESCRIPTION.rst deleted file mode 100644 index dfc4aec..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,428 +0,0 @@ -================================= - celery - Distributed Task Queue -================================= - -.. image:: http://cloud.github.com/downloads/celery/celery/celery_128.png - -:Version: 3.1.26 (Cipater) -:Web: http://celeryproject.org/ -:Download: http://pypi.python.org/pypi/celery/ -:Source: http://github.com/celery/celery/ -:Keywords: task queue, job queue, asynchronous, async, rabbitmq, amqp, redis, - python, webhooks, queue, distributed - --- - -What is a Task Queue? -===================== - -Task queues are used as a mechanism to distribute work across threads or -machines. - -A task queue's input is a unit of work, called a task, dedicated worker -processes then constantly monitor the queue for new work to perform. - -Celery communicates via messages, usually using a broker -to mediate between clients and workers. To initiate a task a client puts a -message on the queue, the broker then delivers the message to a worker. - -A Celery system can consist of multiple workers and brokers, giving way -to high availability and horizontal scaling. - -Celery is a library written in Python, but the protocol can be implemented in -any language. So far there's RCelery_ for the Ruby programming language, and a -`PHP client`, but language interoperability can also be achieved -by using webhooks. - -.. _RCelery: http://leapfrogdevelopment.github.com/rcelery/ -.. _`PHP client`: https://github.com/gjedeer/celery-php -.. _`using webhooks`: - http://docs.celeryproject.org/en/latest/userguide/remote-tasks.html - -What do I need? -=============== - -Celery version 3.0 runs on, - -- Python (2.5, 2.6, 2.7, 3.2, 3.3) -- PyPy (1.8, 1.9) -- Jython (2.5, 2.7). - -This is the last version to support Python 2.5, -and from Celery 3.1, Python 2.6 or later is required. -The last version to support Python 2.4 was Celery series 2.2. - -*Celery* is usually used with a message broker to send and receive messages. -The RabbitMQ, Redis transports are feature complete, -but there's also experimental support for a myriad of other solutions, including -using SQLite for local development. - -*Celery* can run on a single machine, on multiple machines, or even -across datacenters. - -Get Started -=========== - -If this is the first time you're trying to use Celery, or you are -new to Celery 3.0 coming from previous versions then you should read our -getting started tutorials: - -- `First steps with Celery`_ - - Tutorial teaching you the bare minimum needed to get started with Celery. - -- `Next steps`_ - - A more complete overview, showing more features. - -.. _`First steps with Celery`: - http://docs.celeryproject.org/en/latest/getting-started/first-steps-with-celery.html - -.. _`Next steps`: - http://docs.celeryproject.org/en/latest/getting-started/next-steps.html - -Celery is... -============ - -- **Simple** - - Celery is easy to use and maintain, and does *not need configuration files*. - - It has an active, friendly community you can talk to for support, - including a `mailing-list`_ and and an IRC channel. - - Here's one of the simplest applications you can make:: - - from celery import Celery - - app = Celery('hello', broker='amqp://guest@localhost//') - - @app.task - def hello(): - return 'hello world' - -- **Highly Available** - - Workers and clients will automatically retry in the event - of connection loss or failure, and some brokers support - HA in way of *Master/Master* or *Master/Slave* replication. - -- **Fast** - - A single Celery process can process millions of tasks a minute, - with sub-millisecond round-trip latency (using RabbitMQ, - py-librabbitmq, and optimized settings). - -- **Flexible** - - Almost every part of *Celery* can be extended or used on its own, - Custom pool implementations, serializers, compression schemes, logging, - schedulers, consumers, producers, autoscalers, broker transports and much more. - -It supports... -============== - - - **Message Transports** - - - RabbitMQ_, Redis_, - - MongoDB_ (experimental), Amazon SQS (experimental), - - CouchDB_ (experimental), SQLAlchemy_ (experimental), - - Django ORM (experimental), `IronMQ`_ - - and more... - - - **Concurrency** - - - Prefork, Eventlet_, gevent_, threads/single threaded - - - **Result Stores** - - - AMQP, Redis - - memcached, MongoDB - - SQLAlchemy, Django ORM - - Apache Cassandra, IronCache - - - **Serialization** - - - *pickle*, *json*, *yaml*, *msgpack*. - - *zlib*, *bzip2* compression. - - Cryptographic message signing. - -.. _`Eventlet`: http://eventlet.net/ -.. _`gevent`: http://gevent.org/ - -.. _RabbitMQ: http://rabbitmq.com -.. _Redis: http://redis.io -.. _MongoDB: http://mongodb.org -.. _Beanstalk: http://kr.github.com/beanstalkd -.. _CouchDB: http://couchdb.apache.org -.. _SQLAlchemy: http://sqlalchemy.org -.. _`IronMQ`: http://iron.io - -Framework Integration -===================== - -Celery is easy to integrate with web frameworks, some of which even have -integration packages: - - +--------------------+------------------------+ - | `Django`_ | not needed | - +--------------------+------------------------+ - | `Pyramid`_ | `pyramid_celery`_ | - +--------------------+------------------------+ - | `Pylons`_ | `celery-pylons`_ | - +--------------------+------------------------+ - | `Flask`_ | not needed | - +--------------------+------------------------+ - | `web2py`_ | `web2py-celery`_ | - +--------------------+------------------------+ - | `Tornado`_ | `tornado-celery`_ | - +--------------------+------------------------+ - -The integration packages are not strictly necessary, but they can make -development easier, and sometimes they add important hooks like closing -database connections at ``fork``. - -.. _`Django`: http://djangoproject.com/ -.. _`Pylons`: http://pylonsproject.org/ -.. _`Flask`: http://flask.pocoo.org/ -.. _`web2py`: http://web2py.com/ -.. _`Bottle`: http://bottlepy.org/ -.. _`Pyramid`: http://docs.pylonsproject.org/en/latest/docs/pyramid.html -.. _`pyramid_celery`: http://pypi.python.org/pypi/pyramid_celery/ -.. _`django-celery`: http://pypi.python.org/pypi/django-celery -.. _`celery-pylons`: http://pypi.python.org/pypi/celery-pylons -.. _`web2py-celery`: http://code.google.com/p/web2py-celery/ -.. _`Tornado`: http://www.tornadoweb.org/ -.. _`tornado-celery`: http://github.com/mher/tornado-celery/ - -.. _celery-documentation: - -Documentation -============= - -The `latest documentation`_ with user guides, tutorials and API reference -is hosted at Read The Docs. - -.. _`latest documentation`: http://docs.celeryproject.org/en/latest/ - -.. _celery-installation: - -Installation -============ - -You can install Celery either via the Python Package Index (PyPI) -or from source. - -To install using `pip`,:: - - $ pip install -U Celery - -To install using `easy_install`,:: - - $ easy_install -U Celery - -.. _bundles: - -Bundles -------- - -Celery also defines a group of bundles that can be used -to install Celery and the dependencies for a given feature. - -You can specify these in your requirements or on the ``pip`` comand-line -by using brackets. Multiple bundles can be specified by separating them by -commas. -:: - - $ pip install "celery[librabbitmq]" - - $ pip install "celery[librabbitmq,redis,auth,msgpack]" - -The following bundles are available: - -Serializers -~~~~~~~~~~~ - -:celery[auth]: - for using the auth serializer. - -:celery[msgpack]: - for using the msgpack serializer. - -:celery[yaml]: - for using the yaml serializer. - -Concurrency -~~~~~~~~~~~ - -:celery[eventlet]: - for using the eventlet pool. - -:celery[gevent]: - for using the gevent pool. - -:celery[threads]: - for using the thread pool. - -Transports and Backends -~~~~~~~~~~~~~~~~~~~~~~~ - -:celery[librabbitmq]: - for using the librabbitmq C library. - -:celery[redis]: - for using Redis as a message transport or as a result backend. - -:celery[mongodb]: - for using MongoDB as a message transport (*experimental*), - or as a result backend (*supported*). - -:celery[sqs]: - for using Amazon SQS as a message transport (*experimental*). - -:celery[memcache]: - for using memcached as a result backend. - -:celery[cassandra]: - for using Apache Cassandra as a result backend. - -:celery[couchdb]: - for using CouchDB as a message transport (*experimental*). - -:celery[couchbase]: - for using CouchBase as a result backend. - -:celery[beanstalk]: - for using Beanstalk as a message transport (*experimental*). - -:celery[zookeeper]: - for using Zookeeper as a message transport. - -:celery[zeromq]: - for using ZeroMQ as a message transport (*experimental*). - -:celery[sqlalchemy]: - for using SQLAlchemy as a message transport (*experimental*), - or as a result backend (*supported*). - -:celery[pyro]: - for using the Pyro4 message transport (*experimental*). - -:celery[slmq]: - for using the SoftLayer Message Queue transport (*experimental*). - -.. _celery-installing-from-source: - -Downloading and installing from source --------------------------------------- - -Download the latest version of Celery from -http://pypi.python.org/pypi/celery/ - -You can install it by doing the following,:: - - $ tar xvfz celery-0.0.0.tar.gz - $ cd celery-0.0.0 - $ python setup.py build - # python setup.py install - -The last command must be executed as a privileged user if -you are not currently using a virtualenv. - -.. _celery-installing-from-git: - -Using the development version ------------------------------ - -With pip -~~~~~~~~ - -The Celery development version also requires the development -versions of ``kombu``, ``amqp`` and ``billiard``. - -You can install the latest snapshot of these using the following -pip commands:: - - $ pip install https://github.com/celery/celery/zipball/master#egg=celery - $ pip install https://github.com/celery/billiard/zipball/master#egg=billiard - $ pip install https://github.com/celery/py-amqp/zipball/master#egg=amqp - $ pip install https://github.com/celery/kombu/zipball/master#egg=kombu - -With git -~~~~~~~~ - -Please the Contributing section. - -.. _getting-help: - -Getting Help -============ - -.. _mailing-list: - -Mailing list ------------- - -For discussions about the usage, development, and future of celery, -please join the `celery-users`_ mailing list. - -.. _`celery-users`: http://groups.google.com/group/celery-users/ - -.. _irc-channel: - -IRC ---- - -Come chat with us on IRC. The **#celery** channel is located at the `Freenode`_ -network. - -.. _`Freenode`: http://freenode.net - -.. _bug-tracker: - -Bug tracker -=========== - -If you have any suggestions, bug reports or annoyances please report them -to our issue tracker at http://github.com/celery/celery/issues/ - -.. _wiki: - -Wiki -==== - -http://wiki.github.com/celery/celery/ - -.. _contributing-short: - -Contributing -============ - -Development of `celery` happens at Github: http://github.com/celery/celery - -You are highly encouraged to participate in the development -of `celery`. If you don't like Github (for some reason) you're welcome -to send regular patches. - -Be sure to also read the `Contributing to Celery`_ section in the -documentation. - -.. _`Contributing to Celery`: - http://docs.celeryproject.org/en/master/contributing.html - -.. _license: - -License -======= - -This software is licensed under the `New BSD License`. See the ``LICENSE`` -file in the top distribution directory for the full license text. - -.. # vim: syntax=rst expandtab tabstop=4 shiftwidth=4 shiftround - - -.. image:: https://d2weczhvl823v0.cloudfront.net/celery/celery/trend.png - :alt: Bitdeli badge - :target: https://bitdeli.com/free - - diff --git a/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/METADATA b/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/METADATA deleted file mode 100644 index 90226ec..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/METADATA +++ /dev/null @@ -1,500 +0,0 @@ -Metadata-Version: 2.0 -Name: celery -Version: 3.1.26.post2 -Summary: Distributed Task Queue -Home-page: http://celeryproject.org -Author: Ask Solem -Author-email: ask@celeryproject.org -License: BSD -Description-Content-Type: UNKNOWN -Platform: any -Classifier: Development Status :: 5 - Production/Stable -Classifier: License :: OSI Approved :: BSD License -Classifier: Topic :: System :: Distributed Computing -Classifier: Topic :: Software Development :: Object Brokering -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Programming Language :: Python :: Implementation :: PyPy -Classifier: Programming Language :: Python :: Implementation :: Jython -Classifier: Operating System :: OS Independent -Classifier: Operating System :: POSIX -Classifier: Operating System :: Microsoft :: Windows -Classifier: Operating System :: MacOS :: MacOS X -Requires-Dist: pytz (>dev) -Requires-Dist: billiard (<3.4,>=3.3.0.23) -Requires-Dist: kombu (<3.1,>=3.0.37) -Provides-Extra: auth -Requires-Dist: pyOpenSSL; extra == 'auth' -Provides-Extra: beanstalk -Requires-Dist: beanstalkc; extra == 'beanstalk' -Provides-Extra: cassandra -Requires-Dist: pycassa; extra == 'cassandra' -Provides-Extra: couchbase -Requires-Dist: couchbase; extra == 'couchbase' -Provides-Extra: couchdb -Requires-Dist: couchdb; extra == 'couchdb' -Provides-Extra: eventlet -Requires-Dist: eventlet; extra == 'eventlet' -Provides-Extra: gevent -Requires-Dist: gevent; extra == 'gevent' -Provides-Extra: librabbitmq -Requires-Dist: librabbitmq (>=1.6.1); extra == 'librabbitmq' -Provides-Extra: memcache -Requires-Dist: pylibmc; extra == 'memcache' -Provides-Extra: mongodb -Requires-Dist: pymongo (>=2.6.2); extra == 'mongodb' -Provides-Extra: msgpack -Requires-Dist: msgpack-python (>=0.3.0); extra == 'msgpack' -Provides-Extra: pyro -Requires-Dist: pyro4; extra == 'pyro' -Provides-Extra: redis -Requires-Dist: redis (>=2.8.0); extra == 'redis' -Provides-Extra: slmq -Requires-Dist: softlayer-messaging (>=1.0.3); extra == 'slmq' -Provides-Extra: sqlalchemy -Requires-Dist: sqlalchemy; extra == 'sqlalchemy' -Provides-Extra: sqs -Requires-Dist: boto (>=2.13.3); extra == 'sqs' -Provides-Extra: threads -Requires-Dist: threadpool; extra == 'threads' -Provides-Extra: yaml -Requires-Dist: PyYAML (>=3.10); extra == 'yaml' -Provides-Extra: zeromq -Requires-Dist: pyzmq (>=13.1.0); extra == 'zeromq' -Provides-Extra: zookeeper -Requires-Dist: kazoo (>=1.3.1); extra == 'zookeeper' - -================================= - celery - Distributed Task Queue -================================= - -.. image:: http://cloud.github.com/downloads/celery/celery/celery_128.png - -:Version: 3.1.26 (Cipater) -:Web: http://celeryproject.org/ -:Download: http://pypi.python.org/pypi/celery/ -:Source: http://github.com/celery/celery/ -:Keywords: task queue, job queue, asynchronous, async, rabbitmq, amqp, redis, - python, webhooks, queue, distributed - --- - -What is a Task Queue? -===================== - -Task queues are used as a mechanism to distribute work across threads or -machines. - -A task queue's input is a unit of work, called a task, dedicated worker -processes then constantly monitor the queue for new work to perform. - -Celery communicates via messages, usually using a broker -to mediate between clients and workers. To initiate a task a client puts a -message on the queue, the broker then delivers the message to a worker. - -A Celery system can consist of multiple workers and brokers, giving way -to high availability and horizontal scaling. - -Celery is a library written in Python, but the protocol can be implemented in -any language. So far there's RCelery_ for the Ruby programming language, and a -`PHP client`, but language interoperability can also be achieved -by using webhooks. - -.. _RCelery: http://leapfrogdevelopment.github.com/rcelery/ -.. _`PHP client`: https://github.com/gjedeer/celery-php -.. _`using webhooks`: - http://docs.celeryproject.org/en/latest/userguide/remote-tasks.html - -What do I need? -=============== - -Celery version 3.0 runs on, - -- Python (2.5, 2.6, 2.7, 3.2, 3.3) -- PyPy (1.8, 1.9) -- Jython (2.5, 2.7). - -This is the last version to support Python 2.5, -and from Celery 3.1, Python 2.6 or later is required. -The last version to support Python 2.4 was Celery series 2.2. - -*Celery* is usually used with a message broker to send and receive messages. -The RabbitMQ, Redis transports are feature complete, -but there's also experimental support for a myriad of other solutions, including -using SQLite for local development. - -*Celery* can run on a single machine, on multiple machines, or even -across datacenters. - -Get Started -=========== - -If this is the first time you're trying to use Celery, or you are -new to Celery 3.0 coming from previous versions then you should read our -getting started tutorials: - -- `First steps with Celery`_ - - Tutorial teaching you the bare minimum needed to get started with Celery. - -- `Next steps`_ - - A more complete overview, showing more features. - -.. _`First steps with Celery`: - http://docs.celeryproject.org/en/latest/getting-started/first-steps-with-celery.html - -.. _`Next steps`: - http://docs.celeryproject.org/en/latest/getting-started/next-steps.html - -Celery is... -============ - -- **Simple** - - Celery is easy to use and maintain, and does *not need configuration files*. - - It has an active, friendly community you can talk to for support, - including a `mailing-list`_ and and an IRC channel. - - Here's one of the simplest applications you can make:: - - from celery import Celery - - app = Celery('hello', broker='amqp://guest@localhost//') - - @app.task - def hello(): - return 'hello world' - -- **Highly Available** - - Workers and clients will automatically retry in the event - of connection loss or failure, and some brokers support - HA in way of *Master/Master* or *Master/Slave* replication. - -- **Fast** - - A single Celery process can process millions of tasks a minute, - with sub-millisecond round-trip latency (using RabbitMQ, - py-librabbitmq, and optimized settings). - -- **Flexible** - - Almost every part of *Celery* can be extended or used on its own, - Custom pool implementations, serializers, compression schemes, logging, - schedulers, consumers, producers, autoscalers, broker transports and much more. - -It supports... -============== - - - **Message Transports** - - - RabbitMQ_, Redis_, - - MongoDB_ (experimental), Amazon SQS (experimental), - - CouchDB_ (experimental), SQLAlchemy_ (experimental), - - Django ORM (experimental), `IronMQ`_ - - and more... - - - **Concurrency** - - - Prefork, Eventlet_, gevent_, threads/single threaded - - - **Result Stores** - - - AMQP, Redis - - memcached, MongoDB - - SQLAlchemy, Django ORM - - Apache Cassandra, IronCache - - - **Serialization** - - - *pickle*, *json*, *yaml*, *msgpack*. - - *zlib*, *bzip2* compression. - - Cryptographic message signing. - -.. _`Eventlet`: http://eventlet.net/ -.. _`gevent`: http://gevent.org/ - -.. _RabbitMQ: http://rabbitmq.com -.. _Redis: http://redis.io -.. _MongoDB: http://mongodb.org -.. _Beanstalk: http://kr.github.com/beanstalkd -.. _CouchDB: http://couchdb.apache.org -.. _SQLAlchemy: http://sqlalchemy.org -.. _`IronMQ`: http://iron.io - -Framework Integration -===================== - -Celery is easy to integrate with web frameworks, some of which even have -integration packages: - - +--------------------+------------------------+ - | `Django`_ | not needed | - +--------------------+------------------------+ - | `Pyramid`_ | `pyramid_celery`_ | - +--------------------+------------------------+ - | `Pylons`_ | `celery-pylons`_ | - +--------------------+------------------------+ - | `Flask`_ | not needed | - +--------------------+------------------------+ - | `web2py`_ | `web2py-celery`_ | - +--------------------+------------------------+ - | `Tornado`_ | `tornado-celery`_ | - +--------------------+------------------------+ - -The integration packages are not strictly necessary, but they can make -development easier, and sometimes they add important hooks like closing -database connections at ``fork``. - -.. _`Django`: http://djangoproject.com/ -.. _`Pylons`: http://pylonsproject.org/ -.. _`Flask`: http://flask.pocoo.org/ -.. _`web2py`: http://web2py.com/ -.. _`Bottle`: http://bottlepy.org/ -.. _`Pyramid`: http://docs.pylonsproject.org/en/latest/docs/pyramid.html -.. _`pyramid_celery`: http://pypi.python.org/pypi/pyramid_celery/ -.. _`django-celery`: http://pypi.python.org/pypi/django-celery -.. _`celery-pylons`: http://pypi.python.org/pypi/celery-pylons -.. _`web2py-celery`: http://code.google.com/p/web2py-celery/ -.. _`Tornado`: http://www.tornadoweb.org/ -.. _`tornado-celery`: http://github.com/mher/tornado-celery/ - -.. _celery-documentation: - -Documentation -============= - -The `latest documentation`_ with user guides, tutorials and API reference -is hosted at Read The Docs. - -.. _`latest documentation`: http://docs.celeryproject.org/en/latest/ - -.. _celery-installation: - -Installation -============ - -You can install Celery either via the Python Package Index (PyPI) -or from source. - -To install using `pip`,:: - - $ pip install -U Celery - -To install using `easy_install`,:: - - $ easy_install -U Celery - -.. _bundles: - -Bundles -------- - -Celery also defines a group of bundles that can be used -to install Celery and the dependencies for a given feature. - -You can specify these in your requirements or on the ``pip`` comand-line -by using brackets. Multiple bundles can be specified by separating them by -commas. -:: - - $ pip install "celery[librabbitmq]" - - $ pip install "celery[librabbitmq,redis,auth,msgpack]" - -The following bundles are available: - -Serializers -~~~~~~~~~~~ - -:celery[auth]: - for using the auth serializer. - -:celery[msgpack]: - for using the msgpack serializer. - -:celery[yaml]: - for using the yaml serializer. - -Concurrency -~~~~~~~~~~~ - -:celery[eventlet]: - for using the eventlet pool. - -:celery[gevent]: - for using the gevent pool. - -:celery[threads]: - for using the thread pool. - -Transports and Backends -~~~~~~~~~~~~~~~~~~~~~~~ - -:celery[librabbitmq]: - for using the librabbitmq C library. - -:celery[redis]: - for using Redis as a message transport or as a result backend. - -:celery[mongodb]: - for using MongoDB as a message transport (*experimental*), - or as a result backend (*supported*). - -:celery[sqs]: - for using Amazon SQS as a message transport (*experimental*). - -:celery[memcache]: - for using memcached as a result backend. - -:celery[cassandra]: - for using Apache Cassandra as a result backend. - -:celery[couchdb]: - for using CouchDB as a message transport (*experimental*). - -:celery[couchbase]: - for using CouchBase as a result backend. - -:celery[beanstalk]: - for using Beanstalk as a message transport (*experimental*). - -:celery[zookeeper]: - for using Zookeeper as a message transport. - -:celery[zeromq]: - for using ZeroMQ as a message transport (*experimental*). - -:celery[sqlalchemy]: - for using SQLAlchemy as a message transport (*experimental*), - or as a result backend (*supported*). - -:celery[pyro]: - for using the Pyro4 message transport (*experimental*). - -:celery[slmq]: - for using the SoftLayer Message Queue transport (*experimental*). - -.. _celery-installing-from-source: - -Downloading and installing from source --------------------------------------- - -Download the latest version of Celery from -http://pypi.python.org/pypi/celery/ - -You can install it by doing the following,:: - - $ tar xvfz celery-0.0.0.tar.gz - $ cd celery-0.0.0 - $ python setup.py build - # python setup.py install - -The last command must be executed as a privileged user if -you are not currently using a virtualenv. - -.. _celery-installing-from-git: - -Using the development version ------------------------------ - -With pip -~~~~~~~~ - -The Celery development version also requires the development -versions of ``kombu``, ``amqp`` and ``billiard``. - -You can install the latest snapshot of these using the following -pip commands:: - - $ pip install https://github.com/celery/celery/zipball/master#egg=celery - $ pip install https://github.com/celery/billiard/zipball/master#egg=billiard - $ pip install https://github.com/celery/py-amqp/zipball/master#egg=amqp - $ pip install https://github.com/celery/kombu/zipball/master#egg=kombu - -With git -~~~~~~~~ - -Please the Contributing section. - -.. _getting-help: - -Getting Help -============ - -.. _mailing-list: - -Mailing list ------------- - -For discussions about the usage, development, and future of celery, -please join the `celery-users`_ mailing list. - -.. _`celery-users`: http://groups.google.com/group/celery-users/ - -.. _irc-channel: - -IRC ---- - -Come chat with us on IRC. The **#celery** channel is located at the `Freenode`_ -network. - -.. _`Freenode`: http://freenode.net - -.. _bug-tracker: - -Bug tracker -=========== - -If you have any suggestions, bug reports or annoyances please report them -to our issue tracker at http://github.com/celery/celery/issues/ - -.. _wiki: - -Wiki -==== - -http://wiki.github.com/celery/celery/ - -.. _contributing-short: - -Contributing -============ - -Development of `celery` happens at Github: http://github.com/celery/celery - -You are highly encouraged to participate in the development -of `celery`. If you don't like Github (for some reason) you're welcome -to send regular patches. - -Be sure to also read the `Contributing to Celery`_ section in the -documentation. - -.. _`Contributing to Celery`: - http://docs.celeryproject.org/en/master/contributing.html - -.. _license: - -License -======= - -This software is licensed under the `New BSD License`. See the ``LICENSE`` -file in the top distribution directory for the full license text. - -.. # vim: syntax=rst expandtab tabstop=4 shiftwidth=4 shiftround - - -.. image:: https://d2weczhvl823v0.cloudfront.net/celery/celery/trend.png - :alt: Bitdeli badge - :target: https://bitdeli.com/free - - diff --git a/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/RECORD b/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/RECORD deleted file mode 100644 index 3630f9f..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/RECORD +++ /dev/null @@ -1,496 +0,0 @@ -celery/__init__.py,sha256=3CpQmXwUsO3qBXRvUbCUgeb95Hs76iUyti10oevsJWw,5727 -celery/__main__.py,sha256=Zdv8wB4CbSvtgrGUVIZyFkQcHxFS7z3RRijGi4uQMN4,983 -celery/_state.py,sha256=TU-oQvKpbZtrYpU6iF7OJsekP897J_qRR0Y62Y4LSy8,3921 -celery/beat.py,sha256=kcwCMfxcS7Jvd2p7dMmK0J4NO79-OlVQbJJokJWDcHI,19009 -celery/bootsteps.py,sha256=ASlSzf2DFfYfOvtud-p-m_zo7K3f5IKspzTAzjlfNc8,12382 -celery/canvas.py,sha256=b5WZZqdHuI2bhVbroMY-K2VU_XXBY0m5hkxfy3-KNFY,22501 -celery/datastructures.py,sha256=i0evKEjw8-OPZyT77Fjr7q-nrVKPKyk3IbQ94jJzoOk,18647 -celery/exceptions.py,sha256=8SJV-PofoiyOwbSzb8CueVeKlBDTJDHkkgPQE1Suu-w,4526 -celery/five.py,sha256=a-4lbbb-qHnEm0kh7BjENwWIuft-R4WzIC2htemnIsY,11695 -celery/local.py,sha256=vXD1d-QliYsrKAJgsIj0ZNG1KEXHcoB2Ty1JEOWH_Yg,10818 -celery/platforms.py,sha256=0W1WSk8b3AQ6oNhtM5JEgN27DHoXZzzSEJ3nvjwuBs0,24774 -celery/result.py,sha256=kzlMWbWxY_rfI90RsmrV2LB8c7X2iJDaYcOh5esAhy8,28701 -celery/schedules.py,sha256=XrWguXKa8-umIbG805tvzPmUbM6B2d41SKqr86CYUy8,21787 -celery/signals.py,sha256=zuwvWGAyIQLL4F0p83wRSbjBVdnQDnEsiCC3H3_3BAc,2929 -celery/states.py,sha256=qZ880RMvRcspPb87u13wlfiP0ilQh_-Ap_I8-l0PM6w,3430 -celery/app/__init__.py,sha256=Pg6NKoOd4tbajaxrIpMcYqV_gbguCnWGbUavNUJtRVg,4380 -celery/app/amqp.py,sha256=MCAmCd20hXGAO0ilV78BUUPDNxRpE5gLD7vKIodQswk,19101 -celery/app/annotations.py,sha256=mwfXit7ZByMykH0Su7KutgEXC2DxN0MIVKwioXtiqPU,1514 -celery/app/base.py,sha256=knLzZffbOmaC3LAF-zXDzNeVzuOnzr28o_y7EZ7_mFs,24196 -celery/app/builtins.py,sha256=Dmvkm-aeMosvGdFbNGXua5otk81Qjwh5vEIGjlZjPDg,14180 -celery/app/control.py,sha256=7CrvxyZE-fIW0gSDvfUSbaZN5nGd7AWFSUlKKC5AXoI,11023 -celery/app/defaults.py,sha256=4wUvjXss3CoJvdF5B271hot1rquOn26nXHvZ2dbQHaw,11238 -celery/app/log.py,sha256=LzKSBh61d_ZK_yCW5zfR4dOmzSOs6a4cjyAFg75cZu0,9065 -celery/app/registry.py,sha256=pJLgSmSyNtn-q-GESpQQSr2TFzh8yQvPuDHD7XzwxU8,1748 -celery/app/routes.py,sha256=YzooevUbmGNrrAHGR0AwFxtuKWL2xP6g14l99dssaFI,2967 -celery/app/task.py,sha256=TclL59Gs79Sn7h5HVdHOdQtxDU3AfgQJKB7PZz5RzZY,35574 -celery/app/trace.py,sha256=lmdPyBwFKSxkfTjVPOKaTD6Rnnhs1FIHdOhcbcVmhaQ,16717 -celery/app/utils.py,sha256=oR28DoRzVVMaSFOMZ47JFGvFAP3aTtPEEH7B1LsmFAs,8367 -celery/apps/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -celery/apps/beat.py,sha256=Yu31IM0cKqM5FWt1motBjRBAYvpIsODrPRDAp1J_IYI,5189 -celery/apps/worker.py,sha256=c8mxAhCSpG5K9snPXHwpDnCCOggVMlKnH4sS2Dq8SO8,12555 -celery/backends/__init__.py,sha256=2DzVIyTm-lq5W6ElqMAK9AiJxCynp6E-bF7gPoFgfAk,2206 -celery/backends/amqp.py,sha256=p1coJ96bJR-V__RztU58zzlilclnFqK1Mkp1NYnf44E,11622 -celery/backends/base.py,sha256=pBDi5K-SO7bWRB-gXNcDky5ADO0xwJazfOkRFvsMuFc,22572 -celery/backends/cache.py,sha256=DErN0OboNLQRmL-_E6wEbBmxylZPCUJOfyydDryW5wE,4635 -celery/backends/cassandra.py,sha256=UL4qeFYa5qUC0E7oJRmci2JhDp5z7d_OPNsJnkw-B6M,7219 -celery/backends/couchbase.py,sha256=F_qczQDDBmOmViFP8M0RZ0NXPlCWxFovqqGoB4WYghk,3382 -celery/backends/mongodb.py,sha256=Ke9zj5vhmTnVAHplOhiec8B5D62_ty30PDZEF_8LFck,8688 -celery/backends/redis.py,sha256=gBz8DhREI1rKMFwQ9behNVQP8qrWJoBwU3HAi9C4tXU,10566 -celery/backends/rpc.py,sha256=Qllbxw0T-rt6mgRxmNnZUlFgvpSgOiQOdBAU6mjlcGY,1790 -celery/backends/database/__init__.py,sha256=gCwDd2xkJ95jxwGWcIz9IIst1aryaGJ4NjufR7xPmmo,6568 -celery/backends/database/models.py,sha256=k_WXPzVk9BCGm63ne4nhQO5cDpA-WJ4afaoCtdk7dLE,2261 -celery/backends/database/session.py,sha256=tGJAnVNXOj-LW_z8Blh9u8aZ8j01M0aOLHomOrkNmvE,1840 -celery/bin/__init__.py,sha256=YULxAVdpSTcKce56Bt_l9rXSho8pqpcp082NwnkTRHs,87 -celery/bin/amqp.py,sha256=WoQCn_sg9Vbj7Bryd-sUNxNePtsl91c5_Oi3z1W0_Jk,11651 -celery/bin/base.py,sha256=saxceFnADwkNVLySAqgSaBu1W9LKfD2rfP6co_wtcBQ,21336 -celery/bin/beat.py,sha256=abMzN3d3Zu8VBKAeSiZuG1_P1loqTsu7TZWdkXt1ugM,2638 -celery/bin/celery.py,sha256=4BfRWimQltbDzUqIKmq_OSm2X4DYhwUgc0ypyDabLig,29485 -celery/bin/celeryd_detach.py,sha256=oWGoWfOgaSTi4hb-EpAKHWUPA1gXG0sjlMp6pz4PPuA,6026 -celery/bin/events.py,sha256=cSFvfzN5OHNdva0Yuzz5HNM1jhZZXtcaqdL0exVI578,4052 -celery/bin/graph.py,sha256=JycXaXGTtIyxCy96ph1Zk8FQ_3wk-9fhCDueH4hWneo,6420 -celery/bin/multi.py,sha256=owyqxdQROMFAJUMt-L5BFc8DQveSKftDHcZDlRjs_Sc,21265 -celery/bin/worker.py,sha256=P78klQzKKb872rCEXWj5MGUITA7ZN5pxiy559zjd5aU,9014 -celery/concurrency/__init__.py,sha256=t_AgXnznrRCoiAV_7ClDUzhwwu39rKIlpjr0vF7hbDg,820 -celery/concurrency/asynpool.py,sha256=MoEzDfw-po8p_kEUwjRRAATpuUoJ8hUM-BhbFHVKo0w,47804 -celery/concurrency/base.py,sha256=G_AOmxS6wsAMQ8BPcZWK2AoT4y30Sm76TtkZdGgdlrg,4203 -celery/concurrency/eventlet.py,sha256=c2R3K9Trpow43SkvnfFzkR65gbihJhIBTCaQod1LD7E,4287 -celery/concurrency/gevent.py,sha256=KjdibnAD5YfVDh1WHRKIoYKLCdGHp31WOOxXPy9UyMw,3509 -celery/concurrency/prefork.py,sha256=udTgaF-QycG4ZiDpZo_QhtjCuxcM1CUKUk4dhlXQMOU,5755 -celery/concurrency/solo.py,sha256=zi0qLzLJjO8ApdUokc-5UimsJyQFhD-_acSn8Cwgclc,696 -celery/concurrency/threads.py,sha256=N41qfLMLyWqv1cClfAm3ICATWJmC8DqfF3jReRkjgt8,1767 -celery/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -celery/contrib/abortable.py,sha256=bnOC_4lmXSrtGkvSFndEdWiJNyscynLrcpGKnr1NhcM,5094 -celery/contrib/batches.py,sha256=1GaaJw77TSjslI3B_iYleRrM-EPBDCNstmcahC8ER7U,7152 -celery/contrib/methods.py,sha256=PVmZu0PQ1rrAKzb4GzuyEPCYPUgyuFasjMpUFhEOJzU,2613 -celery/contrib/migrate.py,sha256=rMbY-7sn7sgmwkpqDleFCBUg1qR1weSi3DDmIYbss-c,11911 -celery/contrib/rdb.py,sha256=sH69j4_YBBwE9TPlqykaAlf11AN7a7r5_J3Yf5oqAeQ,4986 -celery/contrib/sphinx.py,sha256=SZd8CT67_MkcFrPUuiqDbjRF2B1QKEMO0H_ZnQcOTAQ,2019 -celery/events/__init__.py,sha256=HVSYE0r5JKMwtBbmeas_nM0LZM5wCBSPhR5lQ7GpYiI,14166 -celery/events/cursesmon.py,sha256=4sUQ8eulZwoaIRxSOwxVON86QknY2RO9Sf8dvtzyTZs,18311 -celery/events/dumper.py,sha256=LXvJDwjkO1mnny35r5xChiQinu3pDk5mJRK41PgPMnA,3285 -celery/events/snapshot.py,sha256=gkM6AkWp5Jv49gurjDDeI-NFa5FUWzwZ0A2ALiuV0EI,3268 -celery/events/state.py,sha256=5Qffr6Abj7ASxtV4-p_60PcHSVVneToW0e2Scgx6z5Q,23275 -celery/fixups/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -celery/fixups/django.py,sha256=fyPyX9OUnWhAuk-TWm--0XaeY3dNsdBOrpIvcTvvZWE,9093 -celery/loaders/__init__.py,sha256=qpF2RdoBeubV_teLZ2yKoRq8sj4aqLjRBoSCgyte47Y,1015 -celery/loaders/app.py,sha256=fFFD4SVrECpzM60DZVcnLjDtYhr1tf4ABPtkD3H1MbE,269 -celery/loaders/base.py,sha256=mclr01KtYoD0oXtYSg6erKYw8Bb27u0LJrBrD4hCFQk,9303 -celery/loaders/default.py,sha256=KH0Y2iA486qelyzoewv47SynpYJIofW2vbdFTcmGYbE,1705 -celery/security/__init__.py,sha256=KbfxRiy_FHJbYqVsedV7MlAPsThJdwxhjV5F3IjgQAU,1923 -celery/security/certificate.py,sha256=Mc925ch6wLe2sYXmBsRm7rmr2FXclq1wopEdVpRc6kc,2746 -celery/security/key.py,sha256=rBdjSYIgTHhqrSN2YUmqOU3xn56vamLigpZTtvSQqDI,679 -celery/security/serialization.py,sha256=D9iZNvuxA-SQXolHWOyGRnNPwCNnEqFbjayhf9vQ3E8,4011 -celery/security/utils.py,sha256=mI12UmxFkxzNCdWsrv71N6r9qNHGZwy9weSl_HaCNP0,759 -celery/task/__init__.py,sha256=d0iH36VG3zOfCCv6KjvXrcO-eePFOryCLexFeUI8PLc,1743 -celery/task/base.py,sha256=zkKUF640T8cf2ltk5b_6MOWYwNOYbjqshE9ofceihn0,5583 -celery/task/http.py,sha256=qEu9tPSqSit-5L6MuOJY1EFutFim8JVGL9bked9uSFw,6849 -celery/task/sets.py,sha256=GStbowg-IQW2Xu96qV6leMiYth3gQ9mQAcKy-3hNHkI,2769 -celery/task/trace.py,sha256=unQgQJ3BjhhvPQnkBqJ-WsHj74_nvYmYSn_E1pyGcm4,323 -celery/tests/__init__.py,sha256=G98w19Jt-55CrtCUpBzoE7ooUoDbBH_4OJmune3k0D4,2618 -celery/tests/case.py,sha256=kWtIhEH582gUSNcvSAJeH37RvUuyEEy8svDzuT6ewMg,25267 -celery/tests/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -celery/tests/app/test_amqp.py,sha256=yn8vLfD3lDdPjaTE9NGsNR3aQgcKJX3KNC0Uo8h3p3g,7778 -celery/tests/app/test_annotations.py,sha256=guYRiOgF-jqzqTKdjAbC469_nKjxtdq-PxVJNbMMvow,1532 -celery/tests/app/test_app.py,sha256=vlytaWTW7VcOhqIM4RPkcTRjpp7XtTwPjpEwo7AC3ns,23272 -celery/tests/app/test_beat.py,sha256=zoHiwseH7Vw5YOcerhDMpVKog9QgIPXa7POdMTeb6JM,17387 -celery/tests/app/test_builtins.py,sha256=OxqNpLV9Z6KFrtxokJ8VHVuec-dA40nLCtMVH22pwBw,6575 -celery/tests/app/test_celery.py,sha256=Q4XTxX96kX-IUZMw95Q96AmVeeE1L00_2bfTOORodJg,535 -celery/tests/app/test_control.py,sha256=IcbpqPMVaOsL-9vaskBq8Hx2V7_09CYC5Y8kuobX538,7022 -celery/tests/app/test_defaults.py,sha256=gDxD5htqT_cFeUruz8neLLj-V1ffys5nb7u7138VlKQ,1815 -celery/tests/app/test_exceptions.py,sha256=co-o7xbNKNBAIsIW5E4x5dQntv-HK-72e1PnqsOR3Ag,849 -celery/tests/app/test_loaders.py,sha256=h5c_QJcsmoD56Uwhsi4cACK3w4cP1dnd3d-8-rOUtC0,9487 -celery/tests/app/test_log.py,sha256=nW_uMGURkHnEs-vEGg-ciTYQmXPoQXcfAvfSe7jPZpY,12745 -celery/tests/app/test_registry.py,sha256=Kw6BIkMuJMt-XRMLnVr1Dce3MLZeO4J5-abCEwGf5NM,2512 -celery/tests/app/test_routes.py,sha256=ZuoWarzltzzRx58cB8dam8i1qkZKf00A2IpkBxfCWkQ,5354 -celery/tests/app/test_schedules.py,sha256=KxjiGMXjuzGr0IZsb-Bph2AhUPeNAKNhBBajBSZ7XNo,28559 -celery/tests/app/test_utils.py,sha256=10EAWo_5AyYYddROKuSiylZixzChcqdUg06Wev2PIqw,1309 -celery/tests/backends/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -celery/tests/backends/test_amqp.py,sha256=j6HCUJv4JAn-UGjx9lwW-ZbrcGFzkXXPxtW--CeaGDY,14161 -celery/tests/backends/test_backends.py,sha256=DYm8tSsuUHSz1Gxnm1yBvNa1dHBbXn-WVrARWOoN6Vw,1535 -celery/tests/backends/test_base.py,sha256=vt2vdWekD0bEPT-L-ovdxit5RWbBn3RDdRMmjPBOglc,16071 -celery/tests/backends/test_cache.py,sha256=32keeBhHGLqlDDHzqviHwbAewuRpQPrPTnhv_6aW4fM,10280 -celery/tests/backends/test_cassandra.py,sha256=HOtGEfL82sUXBNOIr0D3z3fINmeeZH-mBDnOD83B93s,6412 -celery/tests/backends/test_couchbase.py,sha256=9Wu1cQ3UKUCV-yrrufeqpAQVvqyeMV1VjGFHXeQxAq0,4782 -celery/tests/backends/test_database.py,sha256=NlN4WTret69GSJrSJBGEU9IDFg1UdFEwpBQoJaI6FSk,6198 -celery/tests/backends/test_mongodb.py,sha256=xGbVOXl7Jfzpi1nYoVAw3RGRH-l89HYbejMS04-i8SM,14247 -celery/tests/backends/test_redis.py,sha256=uVPsHdOU14GSPZPLA6SY2JUUo79GltfUFVy1Olfc7fM,8655 -celery/tests/backends/test_rpc.py,sha256=iQBb0efYHvSSppUc6IaK2L-Jbr_Utk2iUpOoT8AzfYI,2317 -celery/tests/bin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -celery/tests/bin/test_amqp.py,sha256=paYj2ZdtfeodT9zmrwL8Pbmk2aCUhkGnAgbnEmrQZ6k,4721 -celery/tests/bin/test_base.py,sha256=8EXItbrOQT1L-bKP0fxjiwkkEjEMiApqBJrLw0xqbIc,11301 -celery/tests/bin/test_beat.py,sha256=QvTecx2yqc-e0KrQjqAXB3aISc999IHc__I10s6yOJc,5464 -celery/tests/bin/test_celery.py,sha256=CrMMqM3duzFMCt1xPHDf7GNpp7-9otCJFiN2R4HVI3U,18700 -celery/tests/bin/test_celeryd_detach.py,sha256=TchgSUR8vDB8OqRF6VpnYMKktpGrgZIQLXJhShWLcpE,4000 -celery/tests/bin/test_celeryevdump.py,sha256=1ImmCOndSESTVvARls0Wjngvd86NFp4WCF9r32OI8HI,2231 -celery/tests/bin/test_events.py,sha256=HYPiQJcFumiSHwtMnXO8dcURW2eNknyTCoSwpOWhm1w,2435 -celery/tests/bin/test_multi.py,sha256=MVGxbabKXDPgAmdME3K8zSmZ9bTjKkMviBCP0RHoum4,16477 -celery/tests/bin/test_worker.py,sha256=9LJJrDjzRQzM7LAPbEF0sK5mxLj8Xpjso9chODgJiQs,23503 -celery/tests/bin/proj/__init__.py,sha256=Q9qt46aWx0dx_SFfyigaH4goU1_ea01l7T4dhpDEeSs,104 -celery/tests/bin/proj/app.py,sha256=ZpUV5lnfIiYBH1jMsap--ZQbX9YWk-zEO_2RTwI7lYE,102 -celery/tests/compat_modules/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -celery/tests/compat_modules/test_compat.py,sha256=q840-7jXVOMxpB5qS-5Pv99pZXPEeDMx15__SJVEHMI,2376 -celery/tests/compat_modules/test_compat_utils.py,sha256=0GQ1cxCiK8k4qOzvanBSSYLawO1vFEdmJaDAPz0AfCQ,1457 -celery/tests/compat_modules/test_decorators.py,sha256=KS7ghG-RYiibnK4JcGZX_r-d9RsRAhKitLXA72WzsGA,1066 -celery/tests/compat_modules/test_http.py,sha256=q1IaC7oUY9CEPUQga8t6RoMGbQQxBCGC3gODskqW3LU,5008 -celery/tests/compat_modules/test_messaging.py,sha256=XsQIR6vdtnfCpcPchGJUND1d6t6Mi7Cqjo0yJ3TY0zQ,357 -celery/tests/compat_modules/test_sets.py,sha256=h5yzbwuLtVqQHeY7INq9nmERApnhwWs1EbrfP8Lbkh8,7630 -celery/tests/concurrency/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -celery/tests/concurrency/test_concurrency.py,sha256=saYW1_SeBdRJTUwx_9wtNpZXslDJQCQsfcmoNS2BIZ4,3163 -celery/tests/concurrency/test_eventlet.py,sha256=hWsEQlZbSqQoPfjBM8xDq7ZeRJ-UJePxj8xlrmJ96dQ,3303 -celery/tests/concurrency/test_gevent.py,sha256=n8WCZO9JLTPOjVajRKPlaHI_qPRC6tr3DgVPO_3lZ20,4309 -celery/tests/concurrency/test_pool.py,sha256=nKgYR3rHtsuqcxKSGqC_tMF2glqIiecDZMEGG1bYCK4,2326 -celery/tests/concurrency/test_prefork.py,sha256=lSfo-sVt_f6rPjQNNV7hQ1wNGghM5SWwztO_ubcbx58,8490 -celery/tests/concurrency/test_solo.py,sha256=sljYxvp-oY4wSHftFOwXR5jSDCBZvmu_AswysJfeDSg,531 -celery/tests/concurrency/test_threads.py,sha256=8PkYbDDxdiPe3vWvKs3kQoEhPEV4MEVMoapeUQcooAY,1861 -celery/tests/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -celery/tests/contrib/test_abortable.py,sha256=61ys7MX9IgVZ5KsYMZKLoaLxovRcMQL8kz7DK8GHEWg,1513 -celery/tests/contrib/test_methods.py,sha256=_xxqh_QobP8kP_Y0YS-GvYGIFLp6L-aeL8qeSles4DQ,840 -celery/tests/contrib/test_migrate.py,sha256=tHMo0uQ-htzmIv9WBC0-KdLZeLk-75CKqLX2uFLn46Y,11182 -celery/tests/contrib/test_rdb.py,sha256=ubWjYB-0hzPXqVtAyeLw99a4DpdAGBmade9Fh70tKbU,3093 -celery/tests/events/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -celery/tests/events/test_cursesmon.py,sha256=iK8iwm8MtIVUiiWKbzW4WrWdCVX3hBPb4yAwYIrWetM,2653 -celery/tests/events/test_events.py,sha256=hKE-0cIMG8H1_91H9i2fB430J7ii-H2WzTS3q51cdis,8527 -celery/tests/events/test_snapshot.py,sha256=WeTY_uUeKNVSTxVtvAO2xYmftYlwA8uivd2KsmeNWjk,3734 -celery/tests/events/test_state.py,sha256=6exI3OaJ3eMCSYt1_gCgBTzYZ_6lVfm2SjSyVK09V90,18838 -celery/tests/fixups/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -celery/tests/fixups/test_django.py,sha256=LMJEHFjXpS2AY9J9lM03vxh9QOay15HUWj1s7hEAGps,11892 -celery/tests/functional/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -celery/tests/functional/case.py,sha256=hJGE0oy6ABIuBc3osBPQid7KwaKefe8WvsuIrKQkkwg,5599 -celery/tests/functional/tasks.py,sha256=nfDlTt_urjMpu-6ug4KWD5BieWFdxQbkhEVPepfEE_8,341 -celery/tests/security/__init__.py,sha256=ivc_G0iCuqZ1bbKyEABXdcH6X_nXZLIq5MlYgCP6z-A,3623 -celery/tests/security/case.py,sha256=YQ_4RTsCEkPxamivvitHvqsgbkStx-13ma00dwG2MMQ,403 -celery/tests/security/test_certificate.py,sha256=IADR90BtZUo9wOTX_K6QIHFB3qMqALatGnWaB90cfBA,2633 -celery/tests/security/test_key.py,sha256=xMmVbUbB4TzVUq8XZRS2jjuv6hu0AwUXrum-PLTIDqM,845 -celery/tests/security/test_security.py,sha256=QR7KlWiukB0sFtjLVhJDFzQBBWwbMshbzG6jef_RPFI,3845 -celery/tests/security/test_serialization.py,sha256=o0i-FO22l8kbJNSf8ajSg9cIE_oXH3QpECWfwA2bv1k,2252 -celery/tests/slow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -celery/tests/tasks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -celery/tests/tasks/test_canvas.py,sha256=Zu0iO8JxUajNzcOcpgo_kYoTplHS5eI1CgulBSypkaU,10664 -celery/tests/tasks/test_chord.py,sha256=jHLjW-4QwCEkag7uwhnvTcFB3-gdbFpIm0dal_QQO8w,7007 -celery/tests/tasks/test_context.py,sha256=o89z1fvYROuiIYM_HW3DpFaWz6y8-dIze2TSc2UmXoA,2546 -celery/tests/tasks/test_result.py,sha256=aMOqbAaf6SgtrNBwIWbjDC7pDFcNy0nWzabQIiuHHuo,24135 -celery/tests/tasks/test_states.py,sha256=z2OV113N4EPS33AZu3chN3XGEbPIrKmYa699gdIFHI4,1317 -celery/tests/tasks/test_tasks.py,sha256=CIF1MhneGOIUvUelwcBD7j6hUSDevgBVEQd7i6ral5I,15806 -celery/tests/tasks/test_trace.py,sha256=T8ZyKBfccSNTzmXc8_FyJURBO-kTaikijPLOBLDBVXU,6770 -celery/tests/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -celery/tests/utils/test_datastructures.py,sha256=b1nsrozGQTPMVgS5OaT6RrBQNOQgV5DDksadFIi97qc,10629 -celery/tests/utils/test_dispatcher.py,sha256=sZMai1M6cufvaXUcDnD4lLVMUdWM6txOAYsvNq-EDqg,3873 -celery/tests/utils/test_encoding.py,sha256=Lk5BF_Sr8QfcBndp5ldvzmVUwNBA1p_LjKo3t1rGk8c,526 -celery/tests/utils/test_functional.py,sha256=riIDlFNhFfmGa8VH04EEhE2HCUtvlF-ID6epYjLeb5A,5074 -celery/tests/utils/test_imports.py,sha256=rZ-Cjt1SYEvVO7SToxTk5bVmS0yW9Qnt754qX2PGeP0,1284 -celery/tests/utils/test_local.py,sha256=zmP1lZbgmMgFauUeVtEr5maQXWguS6LUxDExXTzSrIk,9755 -celery/tests/utils/test_mail.py,sha256=GJLoH4UAjxNWdFP-vBagjzGQnwuUvtRr45gSF8WXmLY,1594 -celery/tests/utils/test_pickle.py,sha256=j1RuTZJLLw79cquX0rpVy-6BHULvF8Jf0iwF7jOPVVk,1572 -celery/tests/utils/test_platforms.py,sha256=PYJPbu5xl22Ikit7h6Bik82xzDGxFQ8BhzmRWIyHcXU,23906 -celery/tests/utils/test_saferef.py,sha256=sGvHI0iGfpN2p83SaDABRTrHuHNfg2fpFUlbWHpRNis,2050 -celery/tests/utils/test_serialization.py,sha256=wiQPcEhVdNPpKqIIG0akHJ1HADDKGGTm45r5f36LzAQ,1129 -celery/tests/utils/test_sysinfo.py,sha256=wJpb59DawWxJ1ol00RDV1ML_kS-3475amczYgtbnj6Q,909 -celery/tests/utils/test_term.py,sha256=9UdtJKag7NOAaryRoTN_xzoE0SYcDGOdf4S9Dfh62Ww,2633 -celery/tests/utils/test_text.py,sha256=0vomEwnuw0hbA-081xFZso1X8uQ0bx1sDx5lxBDWD3w,2179 -celery/tests/utils/test_threads.py,sha256=RFIaXkJ0TdyXzoGAnHg9t7QhEIEMe44cSFrxYp-gDgA,2666 -celery/tests/utils/test_timer2.py,sha256=z3mxGq3WcpTXe2mwlfHGMj_HkVsFu9YyDkrhA2Wo_s0,5099 -celery/tests/utils/test_timeutils.py,sha256=u_8BEOt04m21JPCjm71nnbvFpEsIxGRQt6aDV_BPieM,8405 -celery/tests/utils/test_utils.py,sha256=GKEN-d4kK0NxSdAn-nnN_WQlJGOqx4RR4tarRTX26ss,2812 -celery/tests/worker/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -celery/tests/worker/test_autoreload.py,sha256=5Vs727vOhs6WCMwYe4RMQcjaTIVO-hPFxXdD5Ka2a0M,9877 -celery/tests/worker/test_autoscale.py,sha256=LNAObLqwy79pF0xLIWR36dk6VtL5Rq0uOT7oLQW3ZD4,5900 -celery/tests/worker/test_bootsteps.py,sha256=8QEzvNayK7oCjCAaX005-fvn11PK4_VXtr7EkSaXaok,9132 -celery/tests/worker/test_components.py,sha256=_0k_lYjst-zh5bwy-GlPMFgaGUsiZdeyu4ycUEnM8b0,920 -celery/tests/worker/test_consumer.py,sha256=8B3WloJo3sY2pzCkMUPZHg7R5u8rNihaS1VGeleLajo,16490 -celery/tests/worker/test_control.py,sha256=iY6BEvjy4jDk5sy7RTgpatz7ZzaJK-JrvF-EgiNrk1Q,21324 -celery/tests/worker/test_heartbeat.py,sha256=AoLPVZdyBZO6-F3JPdMRPC2O1hAYszFIFDPR3-4L3C8,1678 -celery/tests/worker/test_hub.py,sha256=iOPrKj-LN0Ts_OAhaljpjBq5XhYU_KtY51dZq8zGiIM,9735 -celery/tests/worker/test_loops.py,sha256=DMC4xqBQhuRICNwUhPXngM6avUDootuY7LxtKhZ5SAE,14533 -celery/tests/worker/test_request.py,sha256=KEEoQoGkUV81W9BmkOahMIepuJpTGvnsTreFAxrI1-g,31467 -celery/tests/worker/test_revoke.py,sha256=v9ZEOEspe565G8eRAup17o5cXA2BDRiiwxpPgGRDNRo,336 -celery/tests/worker/test_state.py,sha256=x7vtdk05Z44KQiwJOJTPHvebKMDCNy4ErY6_j4suFNs,4595 -celery/tests/worker/test_strategy.py,sha256=NIMURR2DosEY21Jx0KBk3Rz4fpYcuLZ4doFpsUqzFjc,4624 -celery/tests/worker/test_worker.py,sha256=9IcP8_WT4ujLSPL-v5MGp4fwUpUAjLHISJNBM77tzcs,38397 -celery/utils/__init__.py,sha256=kkA4rLGtWwH9m8-kjDxh6pfgf0SGYO-yBag-vrsUEBs,12713 -celery/utils/compat.py,sha256=oV2FXmhw_Yq7ub_RWl-XRZBJmd6xMpdrpaeIXvPgFt8,34 -celery/utils/debug.py,sha256=GihMTBeKlKYs-0lr3f2TXq1lgBh4CC-VhZsO-zkCQ98,3751 -celery/utils/encoding.py,sha256=yh10Ml0TsdibU3EGbd2lvDTpNvxtD6yN_2o8LI7sEno,361 -celery/utils/functional.py,sha256=C9CsNmx_VyB3U2Zwc83eIkDAD50dJN6ayWDukUK9b60,8814 -celery/utils/imports.py,sha256=oSzhVyyt9DZs2KtLqrkOOMwsOePPC_A6h7LeaZsoxJw,2914 -celery/utils/iso8601.py,sha256=zA4OeMDxKGzNEV6aFOWAZzpix7i6VUJms1vabPyx0B8,2738 -celery/utils/log.py,sha256=UYSFLqkxKNXpBbhfY9kZGn4jOVyKrfld-SmDiY2nYOQ,9292 -celery/utils/mail.py,sha256=rnhrwfJXl5cP_KOtcPWypAhBihnm0Fa5U7Xep36QqZ0,4944 -celery/utils/objects.py,sha256=grHN_y3LnktQPQI8eTw9vBwR6KcPBT-BRUL2VJHr6w4,2762 -celery/utils/serialization.py,sha256=Wgo-K628_x1dJTeClG5TWJbKxxfiQrAkEUvE41nRX5s,4869 -celery/utils/sysinfo.py,sha256=zlQAlqJgIt0SGG8AnIYvQRiy0yK9D2cC_RtmJpPz0Ac,993 -celery/utils/term.py,sha256=zBgNYbw86wuLvmEHG18-wXycmgqNiPxQ8bNVWt5bpk4,3927 -celery/utils/text.py,sha256=r5j7bXZr6gAnzr_TGfRT5Lp2OgHi6mPOu8lTmIq8_ss,2020 -celery/utils/threads.py,sha256=Ef1d7pj1loMilftUPqtbGhcQe1NoHPFlbtMHsqd-u04,9636 -celery/utils/timer2.py,sha256=zj3p0jH7lxpmWUAAaCS1EH6ubWp1m3vmyRWd8fCV6CA,4236 -celery/utils/timeutils.py,sha256=VcSgnUv9SmBq7Pcf6YdumLDcSlSpQt1U-Higr-NG0IA,11193 -celery/utils/dispatch/__init__.py,sha256=o1QuagJss6zaoNcLLotHHs94Eii7e4VNqu8j2Zj58y0,113 -celery/utils/dispatch/saferef.py,sha256=E2OXv4ViRbDh8zkQLerQwOeMfNkPmpu1HmxlmSScJbs,10894 -celery/utils/dispatch/signal.py,sha256=1K6bmvN7QdhSyfLwxflTmsxIQrpSirix5bxjjLTE4D0,8343 -celery/worker/__init__.py,sha256=vFwzEd6pUJTu1joU9OL_FIPY6DG4KNcrXJyuJRuGnPw,13641 -celery/worker/autoreload.py,sha256=svnUXyQqm2QlKBiUJWGJS51DcmedEtQgzKp7HYEuf0E,8868 -celery/worker/autoscale.py,sha256=e6iN0hq6FlOvsA9MmIulWySZxiRQNVAc9_ILZtLWetc,4864 -celery/worker/components.py,sha256=I3RmLjA7f0bQ8mFrSpLNH9s-j9Gg0sBptZx7wG9X3ro,7580 -celery/worker/consumer.py,sha256=AGmtw7dHAPHYmx1DLy3R2GbLJa30KXHoaMrLKmwLrzk,29347 -celery/worker/control.py,sha256=6IfSRbMSB7R9yXUGlR4sdkoJderRvKh-uty8tqclejM,11410 -celery/worker/heartbeat.py,sha256=NAM8Bq7ywHabXclltgrnCQb6FbnBh3sLPEveycNP3sk,1737 -celery/worker/job.py,sha256=bmkKSVd5BuHcGdQ_gn3MJeeLkx_-iBvKTRTImLrtBYI,21054 -celery/worker/loops.py,sha256=uAQDdHg-hAo7RvgqVMhgvOkDqmAkJDVGj4FgQNzObAc,3420 -celery/worker/pidbox.py,sha256=wfephMpB1gJu0f0WPUFrsTSPQjSGvwp3FCJNTcPtHzo,3447 -celery/worker/request.py,sha256=twDXCdrvS7T0KAlknT_XubTINPeoXyuqVPNnSsEqQgM,18826 -celery/worker/state.py,sha256=i2DbvX5v483Lyf_VUUKbzp7oMCHSvq5sMbb3A3G1zx4,6791 -celery/worker/strategy.py,sha256=TlObf6FkTTEeGF0FTbkkmh5Axl-IXiNxHZG0ec6C_DQ,3087 -celery-3.1.26.post2.dist-info/DESCRIPTION.rst,sha256=ZjBRr8syYhEecvIb6tx8S15p0Lgv7cWd3DpkJxw8gFs,11599 -celery-3.1.26.post2.dist-info/METADATA,sha256=0QZInn5VoWtzmvqD8gOQYch2rELrfOeA4v5baTqLnT8,14355 -celery-3.1.26.post2.dist-info/RECORD,, -celery-3.1.26.post2.dist-info/WHEEL,sha256=kdsN-5OJAZIiHN-iO4Rhl82KyS0bDWf4uBwMbkNafr8,110 -celery-3.1.26.post2.dist-info/entry_points.txt,sha256=Cx6fgw30zDMkid9S17TYinhJwJHG5MjMfwZNGqDsTb4,178 -celery-3.1.26.post2.dist-info/metadata.json,sha256=wI1gtk7Xfkv36kqvqr7aIy34p86b3R_XDTsh-eJd3IA,3169 -celery-3.1.26.post2.dist-info/top_level.txt,sha256=sQQ-a5HNsZIi2A8DiKQnB1HODFMfmrzIAZIE8t_XiOA,7 -../../../bin/celery,sha256=reolwO892Sx1ruHQnX6Gb7v-Su0tWTjipUH7c7xDZQc,246 -../../../bin/celerybeat,sha256=goFpTFIXyk1hqyNFRA1KfbG61c9lJLp1wSo2pRe3mnU,262 -../../../bin/celeryd,sha256=tl_DPKb1fRWEd_McTOvrwTdSgYw3U4PtFFRb9UnrFFs,266 -../../../bin/celeryd-multi,sha256=Ktk0eE1NxFhtnA9MWP_AberKfyVK307SoM2SCVhQHto,264 -celery-3.1.26.post2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -celery/app/__pycache__/amqp.cpython-36.pyc,, -celery/app/__pycache__/annotations.cpython-36.pyc,, -celery/app/__pycache__/task.cpython-36.pyc,, -celery/app/__pycache__/control.cpython-36.pyc,, -celery/app/__pycache__/trace.cpython-36.pyc,, -celery/app/__pycache__/builtins.cpython-36.pyc,, -celery/app/__pycache__/base.cpython-36.pyc,, -celery/app/__pycache__/log.cpython-36.pyc,, -celery/app/__pycache__/defaults.cpython-36.pyc,, -celery/app/__pycache__/registry.cpython-36.pyc,, -celery/app/__pycache__/utils.cpython-36.pyc,, -celery/app/__pycache__/routes.cpython-36.pyc,, -celery/app/__pycache__/__init__.cpython-36.pyc,, -celery/bin/__pycache__/amqp.cpython-36.pyc,, -celery/bin/__pycache__/beat.cpython-36.pyc,, -celery/bin/__pycache__/celeryd_detach.cpython-36.pyc,, -celery/bin/__pycache__/multi.cpython-36.pyc,, -celery/bin/__pycache__/base.cpython-36.pyc,, -celery/bin/__pycache__/celery.cpython-36.pyc,, -celery/bin/__pycache__/__init__.cpython-36.pyc,, -celery/bin/__pycache__/worker.cpython-36.pyc,, -celery/bin/__pycache__/graph.cpython-36.pyc,, -celery/bin/__pycache__/events.cpython-36.pyc,, -celery/security/__pycache__/key.cpython-36.pyc,, -celery/security/__pycache__/certificate.cpython-36.pyc,, -celery/security/__pycache__/utils.cpython-36.pyc,, -celery/security/__pycache__/serialization.cpython-36.pyc,, -celery/security/__pycache__/__init__.cpython-36.pyc,, -celery/backends/database/__pycache__/models.cpython-36.pyc,, -celery/backends/database/__pycache__/session.cpython-36.pyc,, -celery/backends/database/__pycache__/__init__.cpython-36.pyc,, -celery/backends/__pycache__/amqp.cpython-36.pyc,, -celery/backends/__pycache__/cassandra.cpython-36.pyc,, -celery/backends/__pycache__/rpc.cpython-36.pyc,, -celery/backends/__pycache__/base.cpython-36.pyc,, -celery/backends/__pycache__/couchbase.cpython-36.pyc,, -celery/backends/__pycache__/cache.cpython-36.pyc,, -celery/backends/__pycache__/mongodb.cpython-36.pyc,, -celery/backends/__pycache__/__init__.cpython-36.pyc,, -celery/backends/__pycache__/redis.cpython-36.pyc,, -celery/tests/tasks/__pycache__/test_chord.cpython-36.pyc,, -celery/tests/tasks/__pycache__/test_states.cpython-36.pyc,, -celery/tests/tasks/__pycache__/test_tasks.cpython-36.pyc,, -celery/tests/tasks/__pycache__/test_canvas.cpython-36.pyc,, -celery/tests/tasks/__pycache__/test_context.cpython-36.pyc,, -celery/tests/tasks/__pycache__/test_result.cpython-36.pyc,, -celery/tests/tasks/__pycache__/__init__.cpython-36.pyc,, -celery/tests/tasks/__pycache__/test_trace.cpython-36.pyc,, -celery/tests/app/__pycache__/test_defaults.cpython-36.pyc,, -celery/tests/app/__pycache__/test_registry.cpython-36.pyc,, -celery/tests/app/__pycache__/test_loaders.cpython-36.pyc,, -celery/tests/app/__pycache__/test_builtins.cpython-36.pyc,, -celery/tests/app/__pycache__/test_log.cpython-36.pyc,, -celery/tests/app/__pycache__/test_utils.cpython-36.pyc,, -celery/tests/app/__pycache__/test_control.cpython-36.pyc,, -celery/tests/app/__pycache__/test_celery.cpython-36.pyc,, -celery/tests/app/__pycache__/test_routes.cpython-36.pyc,, -celery/tests/app/__pycache__/test_annotations.cpython-36.pyc,, -celery/tests/app/__pycache__/test_exceptions.cpython-36.pyc,, -celery/tests/app/__pycache__/test_beat.cpython-36.pyc,, -celery/tests/app/__pycache__/test_amqp.cpython-36.pyc,, -celery/tests/app/__pycache__/test_app.cpython-36.pyc,, -celery/tests/app/__pycache__/test_schedules.cpython-36.pyc,, -celery/tests/app/__pycache__/__init__.cpython-36.pyc,, -celery/tests/bin/proj/__pycache__/app.cpython-36.pyc,, -celery/tests/bin/proj/__pycache__/__init__.cpython-36.pyc,, -celery/tests/bin/__pycache__/test_worker.cpython-36.pyc,, -celery/tests/bin/__pycache__/test_events.cpython-36.pyc,, -celery/tests/bin/__pycache__/test_base.cpython-36.pyc,, -celery/tests/bin/__pycache__/test_celery.cpython-36.pyc,, -celery/tests/bin/__pycache__/test_celeryevdump.cpython-36.pyc,, -celery/tests/bin/__pycache__/test_multi.cpython-36.pyc,, -celery/tests/bin/__pycache__/test_beat.cpython-36.pyc,, -celery/tests/bin/__pycache__/test_amqp.cpython-36.pyc,, -celery/tests/bin/__pycache__/test_celeryd_detach.cpython-36.pyc,, -celery/tests/bin/__pycache__/__init__.cpython-36.pyc,, -celery/tests/security/__pycache__/test_security.cpython-36.pyc,, -celery/tests/security/__pycache__/test_certificate.cpython-36.pyc,, -celery/tests/security/__pycache__/test_serialization.cpython-36.pyc,, -celery/tests/security/__pycache__/test_key.cpython-36.pyc,, -celery/tests/security/__pycache__/case.cpython-36.pyc,, -celery/tests/security/__pycache__/__init__.cpython-36.pyc,, -celery/tests/slow/__pycache__/__init__.cpython-36.pyc,, -celery/tests/backends/__pycache__/test_cache.cpython-36.pyc,, -celery/tests/backends/__pycache__/test_backends.cpython-36.pyc,, -celery/tests/backends/__pycache__/test_base.cpython-36.pyc,, -celery/tests/backends/__pycache__/test_redis.cpython-36.pyc,, -celery/tests/backends/__pycache__/test_couchbase.cpython-36.pyc,, -celery/tests/backends/__pycache__/test_mongodb.cpython-36.pyc,, -celery/tests/backends/__pycache__/test_database.cpython-36.pyc,, -celery/tests/backends/__pycache__/test_amqp.cpython-36.pyc,, -celery/tests/backends/__pycache__/test_rpc.cpython-36.pyc,, -celery/tests/backends/__pycache__/__init__.cpython-36.pyc,, -celery/tests/backends/__pycache__/test_cassandra.cpython-36.pyc,, -celery/tests/compat_modules/__pycache__/test_decorators.cpython-36.pyc,, -celery/tests/compat_modules/__pycache__/test_sets.cpython-36.pyc,, -celery/tests/compat_modules/__pycache__/test_http.cpython-36.pyc,, -celery/tests/compat_modules/__pycache__/test_compat.cpython-36.pyc,, -celery/tests/compat_modules/__pycache__/test_messaging.cpython-36.pyc,, -celery/tests/compat_modules/__pycache__/test_compat_utils.cpython-36.pyc,, -celery/tests/compat_modules/__pycache__/__init__.cpython-36.pyc,, -celery/tests/utils/__pycache__/test_pickle.cpython-36.pyc,, -celery/tests/utils/__pycache__/test_local.cpython-36.pyc,, -celery/tests/utils/__pycache__/test_utils.cpython-36.pyc,, -celery/tests/utils/__pycache__/test_imports.cpython-36.pyc,, -celery/tests/utils/__pycache__/test_sysinfo.cpython-36.pyc,, -celery/tests/utils/__pycache__/test_platforms.cpython-36.pyc,, -celery/tests/utils/__pycache__/test_serialization.cpython-36.pyc,, -celery/tests/utils/__pycache__/test_saferef.cpython-36.pyc,, -celery/tests/utils/__pycache__/test_timeutils.cpython-36.pyc,, -celery/tests/utils/__pycache__/test_text.cpython-36.pyc,, -celery/tests/utils/__pycache__/test_datastructures.cpython-36.pyc,, -celery/tests/utils/__pycache__/test_encoding.cpython-36.pyc,, -celery/tests/utils/__pycache__/test_timer2.cpython-36.pyc,, -celery/tests/utils/__pycache__/test_term.cpython-36.pyc,, -celery/tests/utils/__pycache__/__init__.cpython-36.pyc,, -celery/tests/utils/__pycache__/test_dispatcher.cpython-36.pyc,, -celery/tests/utils/__pycache__/test_mail.cpython-36.pyc,, -celery/tests/utils/__pycache__/test_functional.cpython-36.pyc,, -celery/tests/utils/__pycache__/test_threads.cpython-36.pyc,, -celery/tests/__pycache__/case.cpython-36.pyc,, -celery/tests/__pycache__/__init__.cpython-36.pyc,, -celery/tests/contrib/__pycache__/test_migrate.cpython-36.pyc,, -celery/tests/contrib/__pycache__/test_rdb.cpython-36.pyc,, -celery/tests/contrib/__pycache__/test_abortable.cpython-36.pyc,, -celery/tests/contrib/__pycache__/test_methods.cpython-36.pyc,, -celery/tests/contrib/__pycache__/__init__.cpython-36.pyc,, -celery/tests/concurrency/__pycache__/test_solo.cpython-36.pyc,, -celery/tests/concurrency/__pycache__/test_gevent.cpython-36.pyc,, -celery/tests/concurrency/__pycache__/test_concurrency.cpython-36.pyc,, -celery/tests/concurrency/__pycache__/test_eventlet.cpython-36.pyc,, -celery/tests/concurrency/__pycache__/test_pool.cpython-36.pyc,, -celery/tests/concurrency/__pycache__/__init__.cpython-36.pyc,, -celery/tests/concurrency/__pycache__/test_prefork.cpython-36.pyc,, -celery/tests/concurrency/__pycache__/test_threads.cpython-36.pyc,, -celery/tests/fixups/__pycache__/test_django.cpython-36.pyc,, -celery/tests/fixups/__pycache__/__init__.cpython-36.pyc,, -celery/tests/worker/__pycache__/test_consumer.cpython-36.pyc,, -celery/tests/worker/__pycache__/test_request.cpython-36.pyc,, -celery/tests/worker/__pycache__/test_autoreload.cpython-36.pyc,, -celery/tests/worker/__pycache__/test_worker.cpython-36.pyc,, -celery/tests/worker/__pycache__/test_control.cpython-36.pyc,, -celery/tests/worker/__pycache__/test_components.cpython-36.pyc,, -celery/tests/worker/__pycache__/test_revoke.cpython-36.pyc,, -celery/tests/worker/__pycache__/test_hub.cpython-36.pyc,, -celery/tests/worker/__pycache__/test_bootsteps.cpython-36.pyc,, -celery/tests/worker/__pycache__/test_autoscale.cpython-36.pyc,, -celery/tests/worker/__pycache__/test_state.cpython-36.pyc,, -celery/tests/worker/__pycache__/__init__.cpython-36.pyc,, -celery/tests/worker/__pycache__/test_heartbeat.cpython-36.pyc,, -celery/tests/worker/__pycache__/test_loops.cpython-36.pyc,, -celery/tests/worker/__pycache__/test_strategy.cpython-36.pyc,, -celery/tests/events/__pycache__/test_cursesmon.cpython-36.pyc,, -celery/tests/events/__pycache__/test_events.cpython-36.pyc,, -celery/tests/events/__pycache__/test_snapshot.cpython-36.pyc,, -celery/tests/events/__pycache__/test_state.cpython-36.pyc,, -celery/tests/events/__pycache__/__init__.cpython-36.pyc,, -celery/tests/functional/__pycache__/tasks.cpython-36.pyc,, -celery/tests/functional/__pycache__/case.cpython-36.pyc,, -celery/tests/functional/__pycache__/__init__.cpython-36.pyc,, -celery/utils/dispatch/__pycache__/saferef.cpython-36.pyc,, -celery/utils/dispatch/__pycache__/signal.cpython-36.pyc,, -celery/utils/dispatch/__pycache__/__init__.cpython-36.pyc,, -celery/utils/__pycache__/timer2.cpython-36.pyc,, -celery/utils/__pycache__/debug.cpython-36.pyc,, -celery/utils/__pycache__/sysinfo.cpython-36.pyc,, -celery/utils/__pycache__/term.cpython-36.pyc,, -celery/utils/__pycache__/imports.cpython-36.pyc,, -celery/utils/__pycache__/mail.cpython-36.pyc,, -celery/utils/__pycache__/functional.cpython-36.pyc,, -celery/utils/__pycache__/timeutils.cpython-36.pyc,, -celery/utils/__pycache__/objects.cpython-36.pyc,, -celery/utils/__pycache__/text.cpython-36.pyc,, -celery/utils/__pycache__/encoding.cpython-36.pyc,, -celery/utils/__pycache__/compat.cpython-36.pyc,, -celery/utils/__pycache__/log.cpython-36.pyc,, -celery/utils/__pycache__/threads.cpython-36.pyc,, -celery/utils/__pycache__/iso8601.cpython-36.pyc,, -celery/utils/__pycache__/serialization.cpython-36.pyc,, -celery/utils/__pycache__/__init__.cpython-36.pyc,, -celery/__pycache__/beat.cpython-36.pyc,, -celery/__pycache__/schedules.cpython-36.pyc,, -celery/__pycache__/exceptions.cpython-36.pyc,, -celery/__pycache__/datastructures.cpython-36.pyc,, -celery/__pycache__/result.cpython-36.pyc,, -celery/__pycache__/signals.cpython-36.pyc,, -celery/__pycache__/_state.cpython-36.pyc,, -celery/__pycache__/__main__.cpython-36.pyc,, -celery/__pycache__/canvas.cpython-36.pyc,, -celery/__pycache__/five.cpython-36.pyc,, -celery/__pycache__/local.cpython-36.pyc,, -celery/__pycache__/bootsteps.cpython-36.pyc,, -celery/__pycache__/platforms.cpython-36.pyc,, -celery/__pycache__/states.cpython-36.pyc,, -celery/__pycache__/__init__.cpython-36.pyc,, -celery/contrib/__pycache__/rdb.cpython-36.pyc,, -celery/contrib/__pycache__/migrate.cpython-36.pyc,, -celery/contrib/__pycache__/abortable.cpython-36.pyc,, -celery/contrib/__pycache__/batches.cpython-36.pyc,, -celery/contrib/__pycache__/methods.cpython-36.pyc,, -celery/contrib/__pycache__/sphinx.cpython-36.pyc,, -celery/contrib/__pycache__/__init__.cpython-36.pyc,, -celery/concurrency/__pycache__/asynpool.cpython-36.pyc,, -celery/concurrency/__pycache__/gevent.cpython-36.pyc,, -celery/concurrency/__pycache__/base.cpython-36.pyc,, -celery/concurrency/__pycache__/threads.cpython-36.pyc,, -celery/concurrency/__pycache__/prefork.cpython-36.pyc,, -celery/concurrency/__pycache__/eventlet.cpython-36.pyc,, -celery/concurrency/__pycache__/__init__.cpython-36.pyc,, -celery/concurrency/__pycache__/solo.cpython-36.pyc,, -celery/task/__pycache__/trace.cpython-36.pyc,, -celery/task/__pycache__/sets.cpython-36.pyc,, -celery/task/__pycache__/base.cpython-36.pyc,, -celery/task/__pycache__/http.cpython-36.pyc,, -celery/task/__pycache__/__init__.cpython-36.pyc,, -celery/fixups/__pycache__/django.cpython-36.pyc,, -celery/fixups/__pycache__/__init__.cpython-36.pyc,, -celery/worker/__pycache__/heartbeat.cpython-36.pyc,, -celery/worker/__pycache__/autoscale.cpython-36.pyc,, -celery/worker/__pycache__/strategy.cpython-36.pyc,, -celery/worker/__pycache__/request.cpython-36.pyc,, -celery/worker/__pycache__/job.cpython-36.pyc,, -celery/worker/__pycache__/state.cpython-36.pyc,, -celery/worker/__pycache__/control.cpython-36.pyc,, -celery/worker/__pycache__/pidbox.cpython-36.pyc,, -celery/worker/__pycache__/loops.cpython-36.pyc,, -celery/worker/__pycache__/components.cpython-36.pyc,, -celery/worker/__pycache__/consumer.cpython-36.pyc,, -celery/worker/__pycache__/autoreload.cpython-36.pyc,, -celery/worker/__pycache__/__init__.cpython-36.pyc,, -celery/events/__pycache__/state.cpython-36.pyc,, -celery/events/__pycache__/cursesmon.cpython-36.pyc,, -celery/events/__pycache__/__init__.cpython-36.pyc,, -celery/events/__pycache__/dumper.cpython-36.pyc,, -celery/events/__pycache__/snapshot.cpython-36.pyc,, -celery/apps/__pycache__/beat.cpython-36.pyc,, -celery/apps/__pycache__/__init__.cpython-36.pyc,, -celery/apps/__pycache__/worker.cpython-36.pyc,, -celery/loaders/__pycache__/app.cpython-36.pyc,, -celery/loaders/__pycache__/default.cpython-36.pyc,, -celery/loaders/__pycache__/base.cpython-36.pyc,, -celery/loaders/__pycache__/__init__.cpython-36.pyc,, diff --git a/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/entry_points.txt b/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/entry_points.txt deleted file mode 100644 index 26ac737..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/entry_points.txt +++ /dev/null @@ -1,6 +0,0 @@ -[console_scripts] -celery = celery.__main__:main -celerybeat = celery.__main__:_compat_beat -celeryd = celery.__main__:_compat_worker -celeryd-multi = celery.__main__:_compat_multi - diff --git a/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/metadata.json b/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/metadata.json deleted file mode 100644 index 5506506..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"classifiers": ["Development Status :: 5 - Production/Stable", "License :: OSI Approved :: BSD License", "Topic :: System :: Distributed Computing", "Topic :: Software Development :: Object Brokering", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Programming Language :: Python :: Implementation :: Jython", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: Microsoft :: Windows", "Operating System :: MacOS :: MacOS X"], "description_content_type": "UNKNOWN", "extensions": {"python.commands": {"wrap_console": {"celery": "celery.__main__:main", "celerybeat": "celery.__main__:_compat_beat", "celeryd": "celery.__main__:_compat_worker", "celeryd-multi": "celery.__main__:_compat_multi"}}, "python.details": {"contacts": [{"email": "ask@celeryproject.org", "name": "Ask Solem", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://celeryproject.org"}}, "python.exports": {"console_scripts": {"celery": "celery.__main__:main", "celerybeat": "celery.__main__:_compat_beat", "celeryd": "celery.__main__:_compat_worker", "celeryd-multi": "celery.__main__:_compat_multi"}}}, "extras": ["auth", "beanstalk", "cassandra", "couchbase", "couchdb", "eventlet", "gevent", "librabbitmq", "memcache", "mongodb", "msgpack", "pyro", "redis", "slmq", "sqlalchemy", "sqs", "threads", "yaml", "zeromq", "zookeeper"], "generator": "bdist_wheel (0.30.0)", "license": "BSD", "metadata_version": "2.0", "name": "celery", "platform": "any", "run_requires": [{"extra": "yaml", "requires": ["PyYAML (>=3.10)"]}, {"extra": "beanstalk", "requires": ["beanstalkc"]}, {"requires": ["billiard (<3.4,>=3.3.0.23)", "kombu (<3.1,>=3.0.37)", "pytz (>dev)"]}, {"extra": "sqs", "requires": ["boto (>=2.13.3)"]}, {"extra": "couchbase", "requires": ["couchbase"]}, {"extra": "couchdb", "requires": ["couchdb"]}, {"extra": "eventlet", "requires": ["eventlet"]}, {"extra": "gevent", "requires": ["gevent"]}, {"extra": "zookeeper", "requires": ["kazoo (>=1.3.1)"]}, {"extra": "librabbitmq", "requires": ["librabbitmq (>=1.6.1)"]}, {"extra": "msgpack", "requires": ["msgpack-python (>=0.3.0)"]}, {"extra": "auth", "requires": ["pyOpenSSL"]}, {"extra": "cassandra", "requires": ["pycassa"]}, {"extra": "memcache", "requires": ["pylibmc"]}, {"extra": "mongodb", "requires": ["pymongo (>=2.6.2)"]}, {"extra": "pyro", "requires": ["pyro4"]}, {"extra": "zeromq", "requires": ["pyzmq (>=13.1.0)"]}, {"extra": "redis", "requires": ["redis (>=2.8.0)"]}, {"extra": "slmq", "requires": ["softlayer-messaging (>=1.0.3)"]}, {"extra": "sqlalchemy", "requires": ["sqlalchemy"]}, {"extra": "threads", "requires": ["threadpool"]}], "summary": "Distributed Task Queue", "test_requires": [{"requires": ["mock (>=1.0.1)", "nose", "unittest2 (>=0.5.1)"]}], "version": "3.1.26.post2"} \ No newline at end of file diff --git a/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/top_level.txt b/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/top_level.txt deleted file mode 100644 index 74f9e8f..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -celery diff --git a/thesisenv/lib/python3.6/site-packages/celery/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/__init__.py deleted file mode 100644 index ba5f057..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/__init__.py +++ /dev/null @@ -1,155 +0,0 @@ -# -*- coding: utf-8 -*- -"""Distributed Task Queue""" -# :copyright: (c) 2015 Ask Solem and individual contributors. -# All rights # reserved. -# :copyright: (c) 2012-2014 GoPivotal, Inc., All rights reserved. -# :copyright: (c) 2009 - 2012 Ask Solem and individual contributors, -# All rights reserved. -# :license: BSD (3 Clause), see LICENSE for more details. - -from __future__ import absolute_import - -import os -import sys - -from collections import namedtuple - -version_info_t = namedtuple( - 'version_info_t', ('major', 'minor', 'micro', 'releaselevel', 'serial'), -) - -SERIES = 'Cipater' -VERSION = version_info_t(3, 1, 26, '.post2', '') -__version__ = '{0.major}.{0.minor}.{0.micro}{0.releaselevel}'.format(VERSION) -__author__ = 'Ask Solem' -__contact__ = 'ask@celeryproject.org' -__homepage__ = 'http://celeryproject.org' -__docformat__ = 'restructuredtext' -__all__ = [ - 'Celery', 'bugreport', 'shared_task', 'task', - 'current_app', 'current_task', 'maybe_signature', - 'chain', 'chord', 'chunks', 'group', 'signature', - 'xmap', 'xstarmap', 'uuid', 'version', '__version__', -] -VERSION_BANNER = '{0} ({1})'.format(__version__, SERIES) - -# -eof meta- - -if os.environ.get('C_IMPDEBUG'): # pragma: no cover - from .five import builtins - real_import = builtins.__import__ - - def debug_import(name, locals=None, globals=None, - fromlist=None, level=-1): - glob = globals or getattr(sys, 'emarfteg_'[::-1])(1).f_globals - importer_name = glob and glob.get('__name__') or 'unknown' - print('-- {0} imports {1}'.format(importer_name, name)) - return real_import(name, locals, globals, fromlist, level) - builtins.__import__ = debug_import - -# This is never executed, but tricks static analyzers (PyDev, PyCharm, -# pylint, etc.) into knowing the types of these symbols, and what -# they contain. -STATICA_HACK = True -globals()['kcah_acitats'[::-1].upper()] = False -if STATICA_HACK: # pragma: no cover - from celery.app import shared_task # noqa - from celery.app.base import Celery # noqa - from celery.app.utils import bugreport # noqa - from celery.app.task import Task # noqa - from celery._state import current_app, current_task # noqa - from celery.canvas import ( # noqa - chain, chord, chunks, group, - signature, maybe_signature, xmap, xstarmap, subtask, - ) - from celery.utils import uuid # noqa - -# Eventlet/gevent patching must happen before importing -# anything else, so these tools must be at top-level. - - -def _find_option_with_arg(argv, short_opts=None, long_opts=None): - """Search argv for option specifying its short and longopt - alternatives. - - Return the value of the option if found. - - """ - for i, arg in enumerate(argv): - if arg.startswith('-'): - if long_opts and arg.startswith('--'): - name, _, val = arg.partition('=') - if name in long_opts: - return val - if short_opts and arg in short_opts: - return argv[i + 1] - raise KeyError('|'.join(short_opts or [] + long_opts or [])) - - -def _patch_eventlet(): - import eventlet - import eventlet.debug - eventlet.monkey_patch() - EVENTLET_DBLOCK = int(os.environ.get('EVENTLET_NOBLOCK', 0)) - if EVENTLET_DBLOCK: - eventlet.debug.hub_blocking_detection(EVENTLET_DBLOCK) - - -def _patch_gevent(): - from gevent import monkey, version_info - monkey.patch_all() - if version_info[0] == 0: # pragma: no cover - # Signals aren't working in gevent versions <1.0, - # and are not monkey patched by patch_all() - from gevent import signal as _gevent_signal - _signal = __import__('signal') - _signal.signal = _gevent_signal - - -def maybe_patch_concurrency(argv=sys.argv, - short_opts=['-P'], long_opts=['--pool'], - patches={'eventlet': _patch_eventlet, - 'gevent': _patch_gevent}): - """With short and long opt alternatives that specify the command line - option to set the pool, this makes sure that anything that needs - to be patched is completed as early as possible. - (e.g. eventlet/gevent monkey patches).""" - try: - pool = _find_option_with_arg(argv, short_opts, long_opts) - except KeyError: - pass - else: - try: - patcher = patches[pool] - except KeyError: - pass - else: - patcher() - # set up eventlet/gevent environments ASAP. - from celery import concurrency - concurrency.get_implementation(pool) - -# Lazy loading -from celery import five # noqa - -old_module, new_module = five.recreate_module( # pragma: no cover - __name__, - by_module={ - 'celery.app': ['Celery', 'bugreport', 'shared_task'], - 'celery.app.task': ['Task'], - 'celery._state': ['current_app', 'current_task'], - 'celery.canvas': ['chain', 'chord', 'chunks', 'group', - 'signature', 'maybe_signature', 'subtask', - 'xmap', 'xstarmap'], - 'celery.utils': ['uuid'], - }, - direct={'task': 'celery.task'}, - __package__='celery', __file__=__file__, - __path__=__path__, __doc__=__doc__, __version__=__version__, - __author__=__author__, __contact__=__contact__, - __homepage__=__homepage__, __docformat__=__docformat__, five=five, - VERSION=VERSION, SERIES=SERIES, VERSION_BANNER=VERSION_BANNER, - version_info_t=version_info_t, - maybe_patch_concurrency=maybe_patch_concurrency, - _find_option_with_arg=_find_option_with_arg, -) diff --git a/thesisenv/lib/python3.6/site-packages/celery/__main__.py b/thesisenv/lib/python3.6/site-packages/celery/__main__.py deleted file mode 100644 index 04448e2..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/__main__.py +++ /dev/null @@ -1,54 +0,0 @@ -from __future__ import absolute_import - -import sys - -from os.path import basename - -from . import maybe_patch_concurrency - -__all__ = ['main'] - -DEPRECATED_FMT = """ -The {old!r} command is deprecated, please use {new!r} instead: - -$ {new_argv} - -""" - - -def _warn_deprecated(new): - print(DEPRECATED_FMT.format( - old=basename(sys.argv[0]), new=new, - new_argv=' '.join([new] + sys.argv[1:])), - ) - - -def main(): - if 'multi' not in sys.argv: - maybe_patch_concurrency() - from celery.bin.celery import main - main() - - -def _compat_worker(): - maybe_patch_concurrency() - _warn_deprecated('celery worker') - from celery.bin.worker import main - main() - - -def _compat_multi(): - _warn_deprecated('celery multi') - from celery.bin.multi import main - main() - - -def _compat_beat(): - maybe_patch_concurrency() - _warn_deprecated('celery beat') - from celery.bin.beat import main - main() - - -if __name__ == '__main__': # pragma: no cover - main() diff --git a/thesisenv/lib/python3.6/site-packages/celery/_state.py b/thesisenv/lib/python3.6/site-packages/celery/_state.py deleted file mode 100644 index 755bb92..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/_state.py +++ /dev/null @@ -1,159 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery._state - ~~~~~~~~~~~~~~~ - - This is an internal module containing thread state - like the ``current_app``, and ``current_task``. - - This module shouldn't be used directly. - -""" -from __future__ import absolute_import, print_function - -import os -import sys -import threading -import weakref - -from celery.local import Proxy -from celery.utils.threads import LocalStack - -try: - from weakref import WeakSet as AppSet -except ImportError: # XXX Py2.6 - - class AppSet(object): # noqa - - def __init__(self): - self._refs = set() - - def add(self, app): - self._refs.add(weakref.ref(app)) - - def __iter__(self): - dirty = [] - try: - for appref in self._refs: - app = appref() - if app is None: - dirty.append(appref) - else: - yield app - finally: - while dirty: - self._refs.discard(dirty.pop()) - -__all__ = ['set_default_app', 'get_current_app', 'get_current_task', - 'get_current_worker_task', 'current_app', 'current_task', - 'connect_on_app_finalize'] - -#: Global default app used when no current app. -default_app = None - -#: List of all app instances (weakrefs), must not be used directly. -_apps = AppSet() - -#: global set of functions to call whenever a new app is finalized -#: E.g. Shared tasks, and builtin tasks are created -#: by adding callbacks here. -_on_app_finalizers = set() - -_task_join_will_block = False - - -def connect_on_app_finalize(callback): - _on_app_finalizers.add(callback) - return callback - - -def _announce_app_finalized(app): - callbacks = set(_on_app_finalizers) - for callback in callbacks: - callback(app) - - -def _set_task_join_will_block(blocks): - global _task_join_will_block - _task_join_will_block = blocks - - -def task_join_will_block(): - return _task_join_will_block - - -class _TLS(threading.local): - #: Apps with the :attr:`~celery.app.base.BaseApp.set_as_current` attribute - #: sets this, so it will always contain the last instantiated app, - #: and is the default app returned by :func:`app_or_default`. - current_app = None -_tls = _TLS() - -_task_stack = LocalStack() - - -def set_default_app(app): - global default_app - default_app = app - - -def _get_current_app(): - if default_app is None: - #: creates the global fallback app instance. - from celery.app import Celery - set_default_app(Celery( - 'default', - loader=os.environ.get('CELERY_LOADER') or 'default', - fixups=[], - set_as_current=False, accept_magic_kwargs=True, - )) - return _tls.current_app or default_app - - -def _set_current_app(app): - _tls.current_app = app - - -C_STRICT_APP = os.environ.get('C_STRICT_APP') -if os.environ.get('C_STRICT_APP'): # pragma: no cover - def get_current_app(): - raise Exception('USES CURRENT APP') - import traceback - print('-- USES CURRENT_APP', file=sys.stderr) # noqa+ - traceback.print_stack(file=sys.stderr) - return _get_current_app() -else: - get_current_app = _get_current_app - - -def get_current_task(): - """Currently executing task.""" - return _task_stack.top - - -def get_current_worker_task(): - """Currently executing task, that was applied by the worker. - - This is used to differentiate between the actual task - executed by the worker and any task that was called within - a task (using ``task.__call__`` or ``task.apply``) - - """ - for task in reversed(_task_stack.stack): - if not task.request.called_directly: - return task - - -#: Proxy to current app. -current_app = Proxy(get_current_app) - -#: Proxy to current task. -current_task = Proxy(get_current_task) - - -def _register_app(app): - _apps.add(app) - - -def _get_active_apps(): - return _apps diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/app/__init__.py deleted file mode 100644 index 952a874..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/__init__.py +++ /dev/null @@ -1,150 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.app - ~~~~~~~~~~ - - Celery Application. - -""" -from __future__ import absolute_import - -import os - -from celery.local import Proxy -from celery import _state -from celery._state import ( - get_current_app as current_app, - get_current_task as current_task, - connect_on_app_finalize, set_default_app, _get_active_apps, _task_stack, -) -from celery.utils import gen_task_name - -from .base import Celery, AppPickler - -__all__ = ['Celery', 'AppPickler', 'default_app', 'app_or_default', - 'bugreport', 'enable_trace', 'disable_trace', 'shared_task', - 'set_default_app', 'current_app', 'current_task', - 'push_current_task', 'pop_current_task'] - -#: Proxy always returning the app set as default. -default_app = Proxy(lambda: _state.default_app) - -#: Function returning the app provided or the default app if none. -#: -#: The environment variable :envvar:`CELERY_TRACE_APP` is used to -#: trace app leaks. When enabled an exception is raised if there -#: is no active app. -app_or_default = None - -#: The 'default' loader is the default loader used by old applications. -#: This is deprecated and should no longer be used as it's set too early -#: to be affected by --loader argument. -default_loader = os.environ.get('CELERY_LOADER') or 'default' # XXX - - -#: Function used to push a task to the thread local stack -#: keeping track of the currently executing task. -#: You must remember to pop the task after. -push_current_task = _task_stack.push - -#: Function used to pop a task from the thread local stack -#: keeping track of the currently executing task. -pop_current_task = _task_stack.pop - - -def bugreport(app=None): - return (app or current_app()).bugreport() - - -def _app_or_default(app=None): - if app is None: - return _state.get_current_app() - return app - - -def _app_or_default_trace(app=None): # pragma: no cover - from traceback import print_stack - from billiard import current_process - if app is None: - if getattr(_state._tls, 'current_app', None): - print('-- RETURNING TO CURRENT APP --') # noqa+ - print_stack() - return _state._tls.current_app - if current_process()._name == 'MainProcess': - raise Exception('DEFAULT APP') - print('-- RETURNING TO DEFAULT APP --') # noqa+ - print_stack() - return _state.default_app - return app - - -def enable_trace(): - global app_or_default - app_or_default = _app_or_default_trace - - -def disable_trace(): - global app_or_default - app_or_default = _app_or_default - -if os.environ.get('CELERY_TRACE_APP'): # pragma: no cover - enable_trace() -else: - disable_trace() - -App = Celery # XXX Compat - - -def shared_task(*args, **kwargs): - """Create shared tasks (decorator). - Will return a proxy that always takes the task from the current apps - task registry. - - This can be used by library authors to create tasks that will work - for any app environment. - - Example: - - >>> from celery import Celery, shared_task - >>> @shared_task - ... def add(x, y): - ... return x + y - - >>> app1 = Celery(broker='amqp://') - >>> add.app is app1 - True - - >>> app2 = Celery(broker='redis://') - >>> add.app is app2 - - """ - - def create_shared_task(**options): - - def __inner(fun): - name = options.get('name') - # Set as shared task so that unfinalized apps, - # and future apps will load the task. - connect_on_app_finalize( - lambda app: app._task_from_fun(fun, **options) - ) - - # Force all finalized apps to take this task as well. - for app in _get_active_apps(): - if app.finalized: - with app._finalize_mutex: - app._task_from_fun(fun, **options) - - # Return a proxy that always gets the task from the current - # apps task registry. - def task_by_cons(): - app = current_app() - return app.tasks[ - name or gen_task_name(app, fun.__name__, fun.__module__) - ] - return Proxy(task_by_cons) - return __inner - - if len(args) == 1 and callable(args[0]): - return create_shared_task(**kwargs)(args[0]) - return create_shared_task(*args, **kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/amqp.py b/thesisenv/lib/python3.6/site-packages/celery/app/amqp.py deleted file mode 100644 index 27838c2..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/amqp.py +++ /dev/null @@ -1,512 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.app.amqp - ~~~~~~~~~~~~~~~ - - Sending and receiving messages using Kombu. - -""" -from __future__ import absolute_import - -import numbers - -from datetime import timedelta -from weakref import WeakValueDictionary - -from kombu import Connection, Consumer, Exchange, Producer, Queue -from kombu.common import Broadcast -from kombu.pools import ProducerPool -from kombu.utils import cached_property, uuid -from kombu.utils.encoding import safe_repr -from kombu.utils.functional import maybe_list - -from celery import signals -from celery.five import items, string_t -from celery.utils.text import indent as textindent -from celery.utils.timeutils import to_utc - -from . import app_or_default -from . import routes as _routes - -__all__ = ['AMQP', 'Queues', 'TaskProducer', 'TaskConsumer'] - -#: earliest date supported by time.mktime. -INT_MIN = -2147483648 - -#: Human readable queue declaration. -QUEUE_FORMAT = """ -.> {0.name:<16} exchange={0.exchange.name}({0.exchange.type}) \ -key={0.routing_key} -""" - - -class Queues(dict): - """Queue name⇒ declaration mapping. - - :param queues: Initial list/tuple or dict of queues. - :keyword create_missing: By default any unknown queues will be - added automatically, but if disabled - the occurrence of unknown queues - in `wanted` will raise :exc:`KeyError`. - :keyword ha_policy: Default HA policy for queues with none set. - - - """ - #: If set, this is a subset of queues to consume from. - #: The rest of the queues are then used for routing only. - _consume_from = None - - def __init__(self, queues=None, default_exchange=None, - create_missing=True, ha_policy=None, autoexchange=None): - dict.__init__(self) - self.aliases = WeakValueDictionary() - self.default_exchange = default_exchange - self.create_missing = create_missing - self.ha_policy = ha_policy - self.autoexchange = Exchange if autoexchange is None else autoexchange - if isinstance(queues, (tuple, list)): - queues = dict((q.name, q) for q in queues) - for name, q in items(queues or {}): - self.add(q) if isinstance(q, Queue) else self.add_compat(name, **q) - - def __getitem__(self, name): - try: - return self.aliases[name] - except KeyError: - return dict.__getitem__(self, name) - - def __setitem__(self, name, queue): - if self.default_exchange and (not queue.exchange or - not queue.exchange.name): - queue.exchange = self.default_exchange - dict.__setitem__(self, name, queue) - if queue.alias: - self.aliases[queue.alias] = queue - - def __missing__(self, name): - if self.create_missing: - return self.add(self.new_missing(name)) - raise KeyError(name) - - def add(self, queue, **kwargs): - """Add new queue. - - The first argument can either be a :class:`kombu.Queue` instance, - or the name of a queue. If the former the rest of the keyword - arguments are ignored, and options are simply taken from the queue - instance. - - :param queue: :class:`kombu.Queue` instance or name of the queue. - :keyword exchange: (if named) specifies exchange name. - :keyword routing_key: (if named) specifies binding key. - :keyword exchange_type: (if named) specifies type of exchange. - :keyword \*\*options: (if named) Additional declaration options. - - """ - if not isinstance(queue, Queue): - return self.add_compat(queue, **kwargs) - if self.ha_policy: - if queue.queue_arguments is None: - queue.queue_arguments = {} - self._set_ha_policy(queue.queue_arguments) - self[queue.name] = queue - return queue - - def add_compat(self, name, **options): - # docs used to use binding_key as routing key - options.setdefault('routing_key', options.get('binding_key')) - if options['routing_key'] is None: - options['routing_key'] = name - if self.ha_policy is not None: - self._set_ha_policy(options.setdefault('queue_arguments', {})) - q = self[name] = Queue.from_dict(name, **options) - return q - - def _set_ha_policy(self, args): - policy = self.ha_policy - if isinstance(policy, (list, tuple)): - return args.update({'x-ha-policy': 'nodes', - 'x-ha-policy-params': list(policy)}) - args['x-ha-policy'] = policy - - def format(self, indent=0, indent_first=True): - """Format routing table into string for log dumps.""" - active = self.consume_from - if not active: - return '' - info = [QUEUE_FORMAT.strip().format(q) - for _, q in sorted(items(active))] - if indent_first: - return textindent('\n'.join(info), indent) - return info[0] + '\n' + textindent('\n'.join(info[1:]), indent) - - def select_add(self, queue, **kwargs): - """Add new task queue that will be consumed from even when - a subset has been selected using the :option:`-Q` option.""" - q = self.add(queue, **kwargs) - if self._consume_from is not None: - self._consume_from[q.name] = q - return q - - def select(self, include): - """Sets :attr:`consume_from` by selecting a subset of the - currently defined queues. - - :param include: Names of queues to consume from. - Can be iterable or string. - """ - if include: - self._consume_from = dict((name, self[name]) - for name in maybe_list(include)) - select_subset = select # XXX compat - - def deselect(self, exclude): - """Deselect queues so that they will not be consumed from. - - :param exclude: Names of queues to avoid consuming from. - Can be iterable or string. - - """ - if exclude: - exclude = maybe_list(exclude) - if self._consume_from is None: - # using selection - return self.select(k for k in self if k not in exclude) - # using all queues - for queue in exclude: - self._consume_from.pop(queue, None) - select_remove = deselect # XXX compat - - def new_missing(self, name): - return Queue(name, self.autoexchange(name), name) - - @property - def consume_from(self): - if self._consume_from is not None: - return self._consume_from - return self - - -class TaskProducer(Producer): - app = None - auto_declare = False - retry = False - retry_policy = None - utc = True - event_dispatcher = None - send_sent_event = False - - def __init__(self, channel=None, exchange=None, *args, **kwargs): - self.retry = kwargs.pop('retry', self.retry) - self.retry_policy = kwargs.pop('retry_policy', - self.retry_policy or {}) - self.send_sent_event = kwargs.pop('send_sent_event', - self.send_sent_event) - exchange = exchange or self.exchange - self.queues = self.app.amqp.queues # shortcut - self.default_queue = self.app.amqp.default_queue - self._default_mode = self.app.conf.CELERY_DEFAULT_DELIVERY_MODE - super(TaskProducer, self).__init__(channel, exchange, *args, **kwargs) - - def publish_task(self, task_name, task_args=None, task_kwargs=None, - countdown=None, eta=None, task_id=None, group_id=None, - taskset_id=None, # compat alias to group_id - expires=None, exchange=None, exchange_type=None, - event_dispatcher=None, retry=None, retry_policy=None, - queue=None, now=None, retries=0, chord=None, - callbacks=None, errbacks=None, routing_key=None, - serializer=None, delivery_mode=None, compression=None, - reply_to=None, time_limit=None, soft_time_limit=None, - declare=None, headers=None, - send_before_publish=signals.before_task_publish.send, - before_receivers=signals.before_task_publish.receivers, - send_after_publish=signals.after_task_publish.send, - after_receivers=signals.after_task_publish.receivers, - send_task_sent=signals.task_sent.send, # XXX deprecated - sent_receivers=signals.task_sent.receivers, - **kwargs): - """Send task message.""" - retry = self.retry if retry is None else retry - headers = {} if headers is None else headers - - qname = queue - if queue is None and exchange is None: - queue = self.default_queue - if queue is not None: - if isinstance(queue, string_t): - qname, queue = queue, self.queues[queue] - else: - qname = queue.name - exchange = exchange or queue.exchange.name - routing_key = routing_key or queue.routing_key - if declare is None and queue and not isinstance(queue, Broadcast): - declare = [queue] - if delivery_mode is None: - delivery_mode = self._default_mode - - # merge default and custom policy - retry = self.retry if retry is None else retry - _rp = (dict(self.retry_policy, **retry_policy) if retry_policy - else self.retry_policy) - task_id = task_id or uuid() - task_args = task_args or [] - task_kwargs = task_kwargs or {} - if not isinstance(task_args, (list, tuple)): - raise ValueError('task args must be a list or tuple') - if not isinstance(task_kwargs, dict): - raise ValueError('task kwargs must be a dictionary') - if countdown: # Convert countdown to ETA. - self._verify_seconds(countdown, 'countdown') - now = now or self.app.now() - eta = now + timedelta(seconds=countdown) - if self.utc: - eta = to_utc(eta).astimezone(self.app.timezone) - if isinstance(expires, numbers.Real): - self._verify_seconds(expires, 'expires') - now = now or self.app.now() - expires = now + timedelta(seconds=expires) - if self.utc: - expires = to_utc(expires).astimezone(self.app.timezone) - eta = eta and eta.isoformat() - expires = expires and expires.isoformat() - - body = { - 'task': task_name, - 'id': task_id, - 'args': task_args, - 'kwargs': task_kwargs, - 'retries': retries or 0, - 'eta': eta, - 'expires': expires, - 'utc': self.utc, - 'callbacks': callbacks, - 'errbacks': errbacks, - 'timelimit': (time_limit, soft_time_limit), - 'taskset': group_id or taskset_id, - 'chord': chord, - } - - if before_receivers: - send_before_publish( - sender=task_name, body=body, - exchange=exchange, - routing_key=routing_key, - declare=declare, - headers=headers, - properties=kwargs, - retry_policy=retry_policy, - ) - - self.publish( - body, - exchange=exchange, routing_key=routing_key, - serializer=serializer or self.serializer, - compression=compression or self.compression, - headers=headers, - retry=retry, retry_policy=_rp, - reply_to=reply_to, - correlation_id=task_id, - delivery_mode=delivery_mode, declare=declare, - **kwargs - ) - - if after_receivers: - send_after_publish(sender=task_name, body=body, - exchange=exchange, routing_key=routing_key) - - if sent_receivers: # XXX deprecated - send_task_sent(sender=task_name, task_id=task_id, - task=task_name, args=task_args, - kwargs=task_kwargs, eta=eta, - taskset=group_id or taskset_id) - if self.send_sent_event: - evd = event_dispatcher or self.event_dispatcher - exname = exchange or self.exchange - if isinstance(exname, Exchange): - exname = exname.name - evd.publish( - 'task-sent', - { - 'uuid': task_id, - 'name': task_name, - 'args': safe_repr(task_args), - 'kwargs': safe_repr(task_kwargs), - 'retries': retries, - 'eta': eta, - 'expires': expires, - 'queue': qname, - 'exchange': exname, - 'routing_key': routing_key, - }, - self, retry=retry, retry_policy=retry_policy, - ) - return task_id - delay_task = publish_task # XXX Compat - - def _verify_seconds(self, s, what): - if s < INT_MIN: - raise ValueError('%s is out of range: %r' % (what, s)) - return s - - @cached_property - def event_dispatcher(self): - # We call Dispatcher.publish with a custom producer - # so don't need the dispatcher to be "enabled". - return self.app.events.Dispatcher(enabled=False) - - -class TaskPublisher(TaskProducer): - """Deprecated version of :class:`TaskProducer`.""" - - def __init__(self, channel=None, exchange=None, *args, **kwargs): - self.app = app_or_default(kwargs.pop('app', self.app)) - self.retry = kwargs.pop('retry', self.retry) - self.retry_policy = kwargs.pop('retry_policy', - self.retry_policy or {}) - exchange = exchange or self.exchange - if not isinstance(exchange, Exchange): - exchange = Exchange(exchange, - kwargs.pop('exchange_type', 'direct')) - self.queues = self.app.amqp.queues # shortcut - super(TaskPublisher, self).__init__(channel, exchange, *args, **kwargs) - - -class TaskConsumer(Consumer): - app = None - - def __init__(self, channel, queues=None, app=None, accept=None, **kw): - self.app = app or self.app - if accept is None: - accept = self.app.conf.CELERY_ACCEPT_CONTENT - super(TaskConsumer, self).__init__( - channel, - queues or list(self.app.amqp.queues.consume_from.values()), - accept=accept, - **kw - ) - - -class AMQP(object): - Connection = Connection - Consumer = Consumer - - #: compat alias to Connection - BrokerConnection = Connection - - producer_cls = TaskProducer - consumer_cls = TaskConsumer - queues_cls = Queues - - #: Cached and prepared routing table. - _rtable = None - - #: Underlying producer pool instance automatically - #: set by the :attr:`producer_pool`. - _producer_pool = None - - # Exchange class/function used when defining automatic queues. - # E.g. you can use ``autoexchange = lambda n: None`` to use the - # amqp default exchange, which is a shortcut to bypass routing - # and instead send directly to the queue named in the routing key. - autoexchange = None - - def __init__(self, app): - self.app = app - - def flush_routes(self): - self._rtable = _routes.prepare(self.app.conf.CELERY_ROUTES) - - def Queues(self, queues, create_missing=None, ha_policy=None, - autoexchange=None): - """Create new :class:`Queues` instance, using queue defaults - from the current configuration.""" - conf = self.app.conf - if create_missing is None: - create_missing = conf.CELERY_CREATE_MISSING_QUEUES - if ha_policy is None: - ha_policy = conf.CELERY_QUEUE_HA_POLICY - if not queues and conf.CELERY_DEFAULT_QUEUE: - queues = (Queue(conf.CELERY_DEFAULT_QUEUE, - exchange=self.default_exchange, - routing_key=conf.CELERY_DEFAULT_ROUTING_KEY), ) - autoexchange = (self.autoexchange if autoexchange is None - else autoexchange) - return self.queues_cls( - queues, self.default_exchange, create_missing, - ha_policy, autoexchange, - ) - - def Router(self, queues=None, create_missing=None): - """Return the current task router.""" - return _routes.Router(self.routes, queues or self.queues, - self.app.either('CELERY_CREATE_MISSING_QUEUES', - create_missing), app=self.app) - - @cached_property - def TaskConsumer(self): - """Return consumer configured to consume from the queues - we are configured for (``app.amqp.queues.consume_from``).""" - return self.app.subclass_with_self(self.consumer_cls, - reverse='amqp.TaskConsumer') - get_task_consumer = TaskConsumer # XXX compat - - @cached_property - def TaskProducer(self): - """Return publisher used to send tasks. - - You should use `app.send_task` instead. - - """ - conf = self.app.conf - return self.app.subclass_with_self( - self.producer_cls, - reverse='amqp.TaskProducer', - exchange=self.default_exchange, - routing_key=conf.CELERY_DEFAULT_ROUTING_KEY, - serializer=conf.CELERY_TASK_SERIALIZER, - compression=conf.CELERY_MESSAGE_COMPRESSION, - retry=conf.CELERY_TASK_PUBLISH_RETRY, - retry_policy=conf.CELERY_TASK_PUBLISH_RETRY_POLICY, - send_sent_event=conf.CELERY_SEND_TASK_SENT_EVENT, - utc=conf.CELERY_ENABLE_UTC, - ) - TaskPublisher = TaskProducer # compat - - @cached_property - def default_queue(self): - return self.queues[self.app.conf.CELERY_DEFAULT_QUEUE] - - @cached_property - def queues(self): - """Queue name⇒ declaration mapping.""" - return self.Queues(self.app.conf.CELERY_QUEUES) - - @queues.setter # noqa - def queues(self, queues): - return self.Queues(queues) - - @property - def routes(self): - if self._rtable is None: - self.flush_routes() - return self._rtable - - @cached_property - def router(self): - return self.Router() - - @property - def producer_pool(self): - if self._producer_pool is None: - self._producer_pool = ProducerPool( - self.app.pool, - limit=self.app.pool.limit, - Producer=self.TaskProducer, - ) - return self._producer_pool - publisher_pool = producer_pool # compat alias - - @cached_property - def default_exchange(self): - return Exchange(self.app.conf.CELERY_DEFAULT_EXCHANGE, - self.app.conf.CELERY_DEFAULT_EXCHANGE_TYPE) diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/annotations.py b/thesisenv/lib/python3.6/site-packages/celery/app/annotations.py deleted file mode 100644 index 27f436b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/annotations.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.app.annotations - ~~~~~~~~~~~~~~~~~~~~~~ - - Annotations is a nice term for monkey patching - task classes in the configuration. - - This prepares and performs the annotations in the - :setting:`CELERY_ANNOTATIONS` setting. - -""" -from __future__ import absolute_import - -from celery.five import string_t -from celery.utils.functional import firstmethod, mlazy -from celery.utils.imports import instantiate - -_first_match = firstmethod('annotate') -_first_match_any = firstmethod('annotate_any') - -__all__ = ['MapAnnotation', 'prepare', 'resolve_all'] - - -class MapAnnotation(dict): - - def annotate_any(self): - try: - return dict(self['*']) - except KeyError: - pass - - def annotate(self, task): - try: - return dict(self[task.name]) - except KeyError: - pass - - -def prepare(annotations): - """Expands the :setting:`CELERY_ANNOTATIONS` setting.""" - - def expand_annotation(annotation): - if isinstance(annotation, dict): - return MapAnnotation(annotation) - elif isinstance(annotation, string_t): - return mlazy(instantiate, annotation) - return annotation - - if annotations is None: - return () - elif not isinstance(annotations, (list, tuple)): - annotations = (annotations, ) - return [expand_annotation(anno) for anno in annotations] - - -def resolve_all(anno, task): - return (x for x in (_first_match(anno, task), _first_match_any(anno)) if x) diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/base.py b/thesisenv/lib/python3.6/site-packages/celery/app/base.py deleted file mode 100644 index 8f33c1b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/base.py +++ /dev/null @@ -1,675 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.app.base - ~~~~~~~~~~~~~~~ - - Actual App instance implementation. - -""" -from __future__ import absolute_import - -import os -import threading -import warnings - -from collections import defaultdict, deque -from copy import deepcopy -from operator import attrgetter - -from amqp import promise -from billiard.util import register_after_fork -from kombu.clocks import LamportClock -from kombu.common import oid_from -from kombu.utils import cached_property, uuid - -from celery import platforms -from celery import signals -from celery._state import ( - _task_stack, get_current_app, _set_current_app, set_default_app, - _register_app, get_current_worker_task, connect_on_app_finalize, - _announce_app_finalized, -) -from celery.exceptions import AlwaysEagerIgnored, ImproperlyConfigured -from celery.five import values -from celery.loaders import get_loader_cls -from celery.local import PromiseProxy, maybe_evaluate -from celery.utils.functional import first, maybe_list -from celery.utils.imports import instantiate, symbol_by_name -from celery.utils.objects import FallbackContext, mro_lookup - -from .annotations import prepare as prepare_annotations -from .defaults import DEFAULTS, find_deprecated_settings -from .registry import TaskRegistry -from .utils import ( - AppPickler, Settings, bugreport, _unpickle_app, _unpickle_app_v2, appstr, -) - -# Load all builtin tasks -from . import builtins # noqa - -__all__ = ['Celery'] - -_EXECV = os.environ.get('FORKED_BY_MULTIPROCESSING') -BUILTIN_FIXUPS = frozenset([ - 'celery.fixups.django:fixup', -]) - -ERR_ENVVAR_NOT_SET = """\ -The environment variable {0!r} is not set, -and as such the configuration could not be loaded. -Please set this variable and make it point to -a configuration module.""" - -_after_fork_registered = False - - -def app_has_custom(app, attr): - return mro_lookup(app.__class__, attr, stop=(Celery, object), - monkey_patched=[__name__]) - - -def _unpickle_appattr(reverse_name, args): - """Given an attribute name and a list of args, gets - the attribute from the current app and calls it.""" - return get_current_app()._rgetattr(reverse_name)(*args) - - -def _global_after_fork(obj): - # Previously every app would call: - # `register_after_fork(app, app._after_fork)` - # but this created a leak as `register_after_fork` stores concrete object - # references and once registered an object cannot be removed without - # touching and iterating over the private afterfork registry list. - # - # See Issue #1949 - from celery import _state - from multiprocessing import util as mputil - for app in _state._apps: - try: - app._after_fork(obj) - except Exception as exc: - if mputil._logger: - mputil._logger.info( - 'after forker raised exception: %r', exc, exc_info=1) - - -def _ensure_after_fork(): - global _after_fork_registered - _after_fork_registered = True - register_after_fork(_global_after_fork, _global_after_fork) - - -class Celery(object): - #: This is deprecated, use :meth:`reduce_keys` instead - Pickler = AppPickler - - SYSTEM = platforms.SYSTEM - IS_OSX, IS_WINDOWS = platforms.IS_OSX, platforms.IS_WINDOWS - - amqp_cls = 'celery.app.amqp:AMQP' - backend_cls = None - events_cls = 'celery.events:Events' - loader_cls = 'celery.loaders.app:AppLoader' - log_cls = 'celery.app.log:Logging' - control_cls = 'celery.app.control:Control' - task_cls = 'celery.app.task:Task' - registry_cls = TaskRegistry - _fixups = None - _pool = None - builtin_fixups = BUILTIN_FIXUPS - - def __init__(self, main=None, loader=None, backend=None, - amqp=None, events=None, log=None, control=None, - set_as_current=True, accept_magic_kwargs=False, - tasks=None, broker=None, include=None, changes=None, - config_source=None, fixups=None, task_cls=None, - autofinalize=True, **kwargs): - self.clock = LamportClock() - self.main = main - self.amqp_cls = amqp or self.amqp_cls - self.events_cls = events or self.events_cls - self.loader_cls = loader or self.loader_cls - self.log_cls = log or self.log_cls - self.control_cls = control or self.control_cls - self.task_cls = task_cls or self.task_cls - self.set_as_current = set_as_current - self.registry_cls = symbol_by_name(self.registry_cls) - self.accept_magic_kwargs = accept_magic_kwargs - self.user_options = defaultdict(set) - self.steps = defaultdict(set) - self.autofinalize = autofinalize - - self.configured = False - self._config_source = config_source - self._pending_defaults = deque() - - self.finalized = False - self._finalize_mutex = threading.Lock() - self._pending = deque() - self._tasks = tasks - if not isinstance(self._tasks, TaskRegistry): - self._tasks = TaskRegistry(self._tasks or {}) - - # If the class defines a custom __reduce_args__ we need to use - # the old way of pickling apps, which is pickling a list of - # args instead of the new way that pickles a dict of keywords. - self._using_v1_reduce = app_has_custom(self, '__reduce_args__') - - # these options are moved to the config to - # simplify pickling of the app object. - self._preconf = changes or {} - if broker: - self._preconf['BROKER_URL'] = broker - if backend: - self._preconf['CELERY_RESULT_BACKEND'] = backend - if include: - self._preconf['CELERY_IMPORTS'] = include - - # - Apply fixups. - self.fixups = set(self.builtin_fixups) if fixups is None else fixups - # ...store fixup instances in _fixups to keep weakrefs alive. - self._fixups = [symbol_by_name(fixup)(self) for fixup in self.fixups] - - if self.set_as_current: - self.set_current() - - self.on_init() - _register_app(self) - - def set_current(self): - _set_current_app(self) - - def set_default(self): - set_default_app(self) - - def __enter__(self): - return self - - def __exit__(self, *exc_info): - self.close() - - def close(self): - self._maybe_close_pool() - - def on_init(self): - """Optional callback called at init.""" - pass - - def start(self, argv=None): - return instantiate( - 'celery.bin.celery:CeleryCommand', - app=self).execute_from_commandline(argv) - - def worker_main(self, argv=None): - return instantiate( - 'celery.bin.worker:worker', - app=self).execute_from_commandline(argv) - - def task(self, *args, **opts): - """Creates new task class from any callable.""" - if _EXECV and not opts.get('_force_evaluate'): - # When using execv the task in the original module will point to a - # different app, so doing things like 'add.request' will point to - # a differnt task instance. This makes sure it will always use - # the task instance from the current app. - # Really need a better solution for this :( - from . import shared_task - return shared_task(*args, _force_evaluate=True, **opts) - - def inner_create_task_cls(shared=True, filter=None, **opts): - _filt = filter # stupid 2to3 - - def _create_task_cls(fun): - if shared: - def cons(app): - return app._task_from_fun(fun, **opts) - cons.__name__ = fun.__name__ - connect_on_app_finalize(cons) - if self.accept_magic_kwargs: # compat mode - task = self._task_from_fun(fun, **opts) - if filter: - task = filter(task) - return task - - if self.finalized or opts.get('_force_evaluate'): - ret = self._task_from_fun(fun, **opts) - else: - # return a proxy object that evaluates on first use - ret = PromiseProxy(self._task_from_fun, (fun, ), opts, - __doc__=fun.__doc__) - self._pending.append(ret) - if _filt: - return _filt(ret) - return ret - - return _create_task_cls - - if len(args) == 1: - if callable(args[0]): - return inner_create_task_cls(**opts)(*args) - raise TypeError('argument 1 to @task() must be a callable') - if args: - raise TypeError( - '@task() takes exactly 1 argument ({0} given)'.format( - sum([len(args), len(opts)]))) - return inner_create_task_cls(**opts) - - def _task_from_fun(self, fun, **options): - if not self.finalized and not self.autofinalize: - raise RuntimeError('Contract breach: app not finalized') - base = options.pop('base', None) or self.Task - bind = options.pop('bind', False) - - T = type(fun.__name__, (base, ), dict({ - 'app': self, - 'accept_magic_kwargs': False, - 'run': fun if bind else staticmethod(fun), - '_decorated': True, - '__doc__': fun.__doc__, - '__module__': fun.__module__, - '__wrapped__': fun}, **options))() - task = self._tasks[T.name] # return global instance. - return task - - def finalize(self, auto=False): - with self._finalize_mutex: - if not self.finalized: - if auto and not self.autofinalize: - raise RuntimeError('Contract breach: app not finalized') - self.finalized = True - _announce_app_finalized(self) - - pending = self._pending - while pending: - maybe_evaluate(pending.popleft()) - - for task in values(self._tasks): - task.bind(self) - - def add_defaults(self, fun): - if not callable(fun): - d, fun = fun, lambda: d - if self.configured: - return self.conf.add_defaults(fun()) - self._pending_defaults.append(fun) - - def config_from_object(self, obj, silent=False, force=False): - self._config_source = obj - if force or self.configured: - del(self.conf) - return self.loader.config_from_object(obj, silent=silent) - - def config_from_envvar(self, variable_name, silent=False, force=False): - module_name = os.environ.get(variable_name) - if not module_name: - if silent: - return False - raise ImproperlyConfigured( - ERR_ENVVAR_NOT_SET.format(variable_name)) - return self.config_from_object(module_name, silent=silent, force=force) - - def config_from_cmdline(self, argv, namespace='celery'): - self.conf.update(self.loader.cmdline_config_parser(argv, namespace)) - - def setup_security(self, allowed_serializers=None, key=None, cert=None, - store=None, digest='sha1', serializer='json'): - from celery.security import setup_security - return setup_security(allowed_serializers, key, cert, - store, digest, serializer, app=self) - - def autodiscover_tasks(self, packages, related_name='tasks', force=False): - if force: - return self._autodiscover_tasks(packages, related_name) - signals.import_modules.connect(promise( - self._autodiscover_tasks, (packages, related_name), - ), weak=False, sender=self) - - def _autodiscover_tasks(self, packages, related_name='tasks', **kwargs): - # argument may be lazy - packages = packages() if callable(packages) else packages - self.loader.autodiscover_tasks(packages, related_name) - - def send_task(self, name, args=None, kwargs=None, countdown=None, - eta=None, task_id=None, producer=None, connection=None, - router=None, result_cls=None, expires=None, - publisher=None, link=None, link_error=None, - add_to_parent=True, reply_to=None, **options): - task_id = task_id or uuid() - producer = producer or publisher # XXX compat - router = router or self.amqp.router - conf = self.conf - if conf.CELERY_ALWAYS_EAGER: # pragma: no cover - warnings.warn(AlwaysEagerIgnored( - 'CELERY_ALWAYS_EAGER has no effect on send_task', - ), stacklevel=2) - options = router.route(options, name, args, kwargs) - if connection: - producer = self.amqp.TaskProducer(connection) - with self.producer_or_acquire(producer) as P: - self.backend.on_task_call(P, task_id) - task_id = P.publish_task( - name, args, kwargs, countdown=countdown, eta=eta, - task_id=task_id, expires=expires, - callbacks=maybe_list(link), errbacks=maybe_list(link_error), - reply_to=reply_to or self.oid, **options - ) - result = (result_cls or self.AsyncResult)(task_id) - if add_to_parent: - parent = get_current_worker_task() - if parent: - parent.add_trail(result) - return result - - def connection(self, hostname=None, userid=None, password=None, - virtual_host=None, port=None, ssl=None, - connect_timeout=None, transport=None, - transport_options=None, heartbeat=None, - login_method=None, failover_strategy=None, **kwargs): - conf = self.conf - return self.amqp.Connection( - hostname or conf.BROKER_URL, - userid or conf.BROKER_USER, - password or conf.BROKER_PASSWORD, - virtual_host or conf.BROKER_VHOST, - port or conf.BROKER_PORT, - transport=transport or conf.BROKER_TRANSPORT, - ssl=self.either('BROKER_USE_SSL', ssl), - heartbeat=heartbeat, - login_method=login_method or conf.BROKER_LOGIN_METHOD, - failover_strategy=( - failover_strategy or conf.BROKER_FAILOVER_STRATEGY - ), - transport_options=dict( - conf.BROKER_TRANSPORT_OPTIONS, **transport_options or {} - ), - connect_timeout=self.either( - 'BROKER_CONNECTION_TIMEOUT', connect_timeout - ), - ) - broker_connection = connection - - def _acquire_connection(self, pool=True): - """Helper for :meth:`connection_or_acquire`.""" - if pool: - return self.pool.acquire(block=True) - return self.connection() - - def connection_or_acquire(self, connection=None, pool=True, *_, **__): - return FallbackContext(connection, self._acquire_connection, pool=pool) - default_connection = connection_or_acquire # XXX compat - - def producer_or_acquire(self, producer=None): - return FallbackContext( - producer, self.amqp.producer_pool.acquire, block=True, - ) - default_producer = producer_or_acquire # XXX compat - - def prepare_config(self, c): - """Prepare configuration before it is merged with the defaults.""" - return find_deprecated_settings(c) - - def now(self): - return self.loader.now(utc=self.conf.CELERY_ENABLE_UTC) - - def mail_admins(self, subject, body, fail_silently=False): - if self.conf.ADMINS: - to = [admin_email for _, admin_email in self.conf.ADMINS] - return self.loader.mail_admins( - subject, body, fail_silently, to=to, - sender=self.conf.SERVER_EMAIL, - host=self.conf.EMAIL_HOST, - port=self.conf.EMAIL_PORT, - user=self.conf.EMAIL_HOST_USER, - password=self.conf.EMAIL_HOST_PASSWORD, - timeout=self.conf.EMAIL_TIMEOUT, - use_ssl=self.conf.EMAIL_USE_SSL, - use_tls=self.conf.EMAIL_USE_TLS, - ) - - def select_queues(self, queues=None): - return self.amqp.queues.select(queues) - - def either(self, default_key, *values): - """Fallback to the value of a configuration key if none of the - `*values` are true.""" - return first(None, values) or self.conf.get(default_key) - - def bugreport(self): - return bugreport(self) - - def _get_backend(self): - from celery.backends import get_backend_by_url - backend, url = get_backend_by_url( - self.backend_cls or self.conf.CELERY_RESULT_BACKEND, - self.loader) - return backend(app=self, url=url) - - def on_configure(self): - """Callback calld when the app loads configuration""" - pass - - def _get_config(self): - self.on_configure() - if self._config_source: - self.loader.config_from_object(self._config_source) - self.configured = True - s = Settings({}, [self.prepare_config(self.loader.conf), - deepcopy(DEFAULTS)]) - # load lazy config dict initializers. - pending = self._pending_defaults - while pending: - s.add_defaults(maybe_evaluate(pending.popleft()())) - - # preconf options must be explicitly set in the conf, and not - # as defaults or they will not be pickled with the app instance. - # This will cause errors when `CELERYD_FORCE_EXECV=True` as - # the workers will not have a BROKER_URL, CELERY_RESULT_BACKEND, - # or CELERY_IMPORTS set in the config. - if self._preconf: - s.update(self._preconf) - return s - - def _after_fork(self, obj_): - self._maybe_close_pool() - - def _maybe_close_pool(self): - pool, self._pool = self._pool, None - if pool is not None: - pool.force_close_all() - amqp = self.__dict__.get('amqp') - if amqp is not None: - producer_pool, amqp._producer_pool = amqp._producer_pool, None - if producer_pool is not None: - producer_pool.force_close_all() - - def signature(self, *args, **kwargs): - kwargs['app'] = self - return self.canvas.signature(*args, **kwargs) - - def create_task_cls(self): - """Creates a base task class using default configuration - taken from this app.""" - return self.subclass_with_self( - self.task_cls, name='Task', attribute='_app', - keep_reduce=True, abstract=True, - ) - - def subclass_with_self(self, Class, name=None, attribute='app', - reverse=None, keep_reduce=False, **kw): - """Subclass an app-compatible class by setting its app attribute - to be this app instance. - - App-compatible means that the class has a class attribute that - provides the default app it should use, e.g. - ``class Foo: app = None``. - - :param Class: The app-compatible class to subclass. - :keyword name: Custom name for the target class. - :keyword attribute: Name of the attribute holding the app, - default is 'app'. - - """ - Class = symbol_by_name(Class) - reverse = reverse if reverse else Class.__name__ - - def __reduce__(self): - return _unpickle_appattr, (reverse, self.__reduce_args__()) - - attrs = dict({attribute: self}, __module__=Class.__module__, - __doc__=Class.__doc__, **kw) - if not keep_reduce: - attrs['__reduce__'] = __reduce__ - - return type(name or Class.__name__, (Class, ), attrs) - - def _rgetattr(self, path): - return attrgetter(path)(self) - - def __repr__(self): - return '<{0} {1}>'.format(type(self).__name__, appstr(self)) - - def __reduce__(self): - if self._using_v1_reduce: - return self.__reduce_v1__() - return (_unpickle_app_v2, (self.__class__, self.__reduce_keys__())) - - def __reduce_v1__(self): - # Reduce only pickles the configuration changes, - # so the default configuration doesn't have to be passed - # between processes. - return ( - _unpickle_app, - (self.__class__, self.Pickler) + self.__reduce_args__(), - ) - - def __reduce_keys__(self): - """Return keyword arguments used to reconstruct the object - when unpickling.""" - return { - 'main': self.main, - 'changes': self.conf.changes if self.configured else self._preconf, - 'loader': self.loader_cls, - 'backend': self.backend_cls, - 'amqp': self.amqp_cls, - 'events': self.events_cls, - 'log': self.log_cls, - 'control': self.control_cls, - 'accept_magic_kwargs': self.accept_magic_kwargs, - 'fixups': self.fixups, - 'config_source': self._config_source, - 'task_cls': self.task_cls, - } - - def __reduce_args__(self): - """Deprecated method, please use :meth:`__reduce_keys__` instead.""" - return (self.main, self.conf.changes, - self.loader_cls, self.backend_cls, self.amqp_cls, - self.events_cls, self.log_cls, self.control_cls, - self.accept_magic_kwargs, self._config_source) - - @cached_property - def Worker(self): - return self.subclass_with_self('celery.apps.worker:Worker') - - @cached_property - def WorkController(self, **kwargs): - return self.subclass_with_self('celery.worker:WorkController') - - @cached_property - def Beat(self, **kwargs): - return self.subclass_with_self('celery.apps.beat:Beat') - - @cached_property - def Task(self): - return self.create_task_cls() - - @cached_property - def annotations(self): - return prepare_annotations(self.conf.CELERY_ANNOTATIONS) - - @cached_property - def AsyncResult(self): - return self.subclass_with_self('celery.result:AsyncResult') - - @cached_property - def ResultSet(self): - return self.subclass_with_self('celery.result:ResultSet') - - @cached_property - def GroupResult(self): - return self.subclass_with_self('celery.result:GroupResult') - - @cached_property - def TaskSet(self): # XXX compat - """Deprecated! Please use :class:`celery.group` instead.""" - return self.subclass_with_self('celery.task.sets:TaskSet') - - @cached_property - def TaskSetResult(self): # XXX compat - """Deprecated! Please use :attr:`GroupResult` instead.""" - return self.subclass_with_self('celery.result:TaskSetResult') - - @property - def pool(self): - if self._pool is None: - _ensure_after_fork() - limit = self.conf.BROKER_POOL_LIMIT - self._pool = self.connection().Pool(limit=limit) - return self._pool - - @property - def current_task(self): - return _task_stack.top - - @cached_property - def oid(self): - return oid_from(self) - - @cached_property - def amqp(self): - return instantiate(self.amqp_cls, app=self) - - @cached_property - def backend(self): - return self._get_backend() - - @cached_property - def conf(self): - return self._get_config() - - @cached_property - def control(self): - return instantiate(self.control_cls, app=self) - - @cached_property - def events(self): - return instantiate(self.events_cls, app=self) - - @cached_property - def loader(self): - return get_loader_cls(self.loader_cls)(app=self) - - @cached_property - def log(self): - return instantiate(self.log_cls, app=self) - - @cached_property - def canvas(self): - from celery import canvas - return canvas - - @cached_property - def tasks(self): - self.finalize(auto=True) - return self._tasks - - @cached_property - def timezone(self): - from celery.utils.timeutils import timezone - conf = self.conf - tz = conf.CELERY_TIMEZONE - if not tz: - return (timezone.get_timezone('UTC') if conf.CELERY_ENABLE_UTC - else timezone.local) - return timezone.get_timezone(self.conf.CELERY_TIMEZONE) -App = Celery # compat diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/builtins.py b/thesisenv/lib/python3.6/site-packages/celery/app/builtins.py deleted file mode 100644 index 1502768..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/builtins.py +++ /dev/null @@ -1,379 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.app.builtins - ~~~~~~~~~~~~~~~~~~~ - - Built-in tasks that are always available in all - app instances. E.g. chord, group and xmap. - -""" -from __future__ import absolute_import - -from collections import deque - -from celery._state import get_current_worker_task, connect_on_app_finalize -from celery.utils import uuid -from celery.utils.log import get_logger - -__all__ = [] - -logger = get_logger(__name__) - - -@connect_on_app_finalize -def add_backend_cleanup_task(app): - """The backend cleanup task can be used to clean up the default result - backend. - - If the configured backend requires periodic cleanup this task is also - automatically configured to run every day at 4am (requires - :program:`celery beat` to be running). - - """ - @app.task(name='celery.backend_cleanup', - shared=False, _force_evaluate=True) - def backend_cleanup(): - app.backend.cleanup() - return backend_cleanup - - -@connect_on_app_finalize -def add_unlock_chord_task(app): - """This task is used by result backends without native chord support. - - It joins chords by creating a task chain polling the header for completion. - - """ - from celery.canvas import signature - from celery.exceptions import ChordError - from celery.result import allow_join_result, result_from_tuple - - default_propagate = app.conf.CELERY_CHORD_PROPAGATES - - @app.task(name='celery.chord_unlock', max_retries=None, shared=False, - default_retry_delay=1, ignore_result=True, _force_evaluate=True, - bind=True) - def unlock_chord(self, group_id, callback, interval=None, propagate=None, - max_retries=None, result=None, - Result=app.AsyncResult, GroupResult=app.GroupResult, - result_from_tuple=result_from_tuple): - # if propagate is disabled exceptions raised by chord tasks - # will be sent as part of the result list to the chord callback. - # Since 3.1 propagate will be enabled by default, and instead - # the chord callback changes state to FAILURE with the - # exception set to ChordError. - propagate = default_propagate if propagate is None else propagate - if interval is None: - interval = self.default_retry_delay - - # check if the task group is ready, and if so apply the callback. - deps = GroupResult( - group_id, - [result_from_tuple(r, app=app) for r in result], - app=app, - ) - j = deps.join_native if deps.supports_native_join else deps.join - - try: - ready = deps.ready() - except Exception as exc: - raise self.retry( - exc=exc, countdown=interval, max_retries=max_retries, - ) - else: - if not ready: - raise self.retry(countdown=interval, max_retries=max_retries) - - callback = signature(callback, app=app) - try: - with allow_join_result(): - ret = j(timeout=3.0, propagate=propagate) - except Exception as exc: - try: - culprit = next(deps._failed_join_report()) - reason = 'Dependency {0.id} raised {1!r}'.format( - culprit, exc, - ) - except StopIteration: - reason = repr(exc) - logger.error('Chord %r raised: %r', group_id, exc, exc_info=1) - app.backend.chord_error_from_stack(callback, - ChordError(reason)) - else: - try: - callback.delay(ret) - except Exception as exc: - logger.error('Chord %r raised: %r', group_id, exc, exc_info=1) - app.backend.chord_error_from_stack( - callback, - exc=ChordError('Callback error: {0!r}'.format(exc)), - ) - return unlock_chord - - -@connect_on_app_finalize -def add_map_task(app): - from celery.canvas import signature - - @app.task(name='celery.map', shared=False, _force_evaluate=True) - def xmap(task, it): - task = signature(task, app=app).type - return [task(item) for item in it] - return xmap - - -@connect_on_app_finalize -def add_starmap_task(app): - from celery.canvas import signature - - @app.task(name='celery.starmap', shared=False, _force_evaluate=True) - def xstarmap(task, it): - task = signature(task, app=app).type - return [task(*item) for item in it] - return xstarmap - - -@connect_on_app_finalize -def add_chunk_task(app): - from celery.canvas import chunks as _chunks - - @app.task(name='celery.chunks', shared=False, _force_evaluate=True) - def chunks(task, it, n): - return _chunks.apply_chunks(task, it, n) - return chunks - - -@connect_on_app_finalize -def add_group_task(app): - _app = app - from celery.canvas import maybe_signature, signature - from celery.result import result_from_tuple - - class Group(app.Task): - app = _app - name = 'celery.group' - accept_magic_kwargs = False - _decorated = True - - def run(self, tasks, result, group_id, partial_args, - add_to_parent=True): - app = self.app - result = result_from_tuple(result, app) - # any partial args are added to all tasks in the group - taskit = (signature(task, app=app).clone(partial_args) - for i, task in enumerate(tasks)) - if self.request.is_eager or app.conf.CELERY_ALWAYS_EAGER: - return app.GroupResult( - result.id, - [stask.apply(group_id=group_id) for stask in taskit], - ) - with app.producer_or_acquire() as pub: - [stask.apply_async(group_id=group_id, producer=pub, - add_to_parent=False) for stask in taskit] - parent = get_current_worker_task() - if add_to_parent and parent: - parent.add_trail(result) - return result - - def prepare(self, options, tasks, args, **kwargs): - options['group_id'] = group_id = ( - options.setdefault('task_id', uuid())) - - def prepare_member(task): - task = maybe_signature(task, app=self.app) - task.options['group_id'] = group_id - return task, task.freeze() - - try: - tasks, res = list(zip( - *[prepare_member(task) for task in tasks] - )) - except ValueError: # tasks empty - tasks, res = [], [] - return (tasks, self.app.GroupResult(group_id, res), group_id, args) - - def apply_async(self, partial_args=(), kwargs={}, **options): - if self.app.conf.CELERY_ALWAYS_EAGER: - return self.apply(partial_args, kwargs, **options) - tasks, result, gid, args = self.prepare( - options, args=partial_args, **kwargs - ) - super(Group, self).apply_async(( - list(tasks), result.as_tuple(), gid, args), **options - ) - return result - - def apply(self, args=(), kwargs={}, **options): - return super(Group, self).apply( - self.prepare(options, args=args, **kwargs), - **options).get() - return Group - - -@connect_on_app_finalize -def add_chain_task(app): - from celery.canvas import ( - Signature, chain, chord, group, maybe_signature, maybe_unroll_group, - ) - - _app = app - - class Chain(app.Task): - app = _app - name = 'celery.chain' - accept_magic_kwargs = False - _decorated = True - - def prepare_steps(self, args, tasks): - app = self.app - steps = deque(tasks) - next_step = prev_task = prev_res = None - tasks, results = [], [] - i = 0 - while steps: - # First task get partial args from chain. - task = maybe_signature(steps.popleft(), app=app) - task = task.clone() if i else task.clone(args) - res = task.freeze() - i += 1 - - if isinstance(task, group): - task = maybe_unroll_group(task) - if isinstance(task, chain): - # splice the chain - steps.extendleft(reversed(task.tasks)) - continue - - elif isinstance(task, group) and steps and \ - not isinstance(steps[0], group): - # automatically upgrade group(..) | s to chord(group, s) - try: - next_step = steps.popleft() - # for chords we freeze by pretending it's a normal - # task instead of a group. - res = Signature.freeze(next_step) - task = chord(task, body=next_step, task_id=res.task_id) - except IndexError: - pass # no callback, so keep as group - if prev_task: - # link previous task to this task. - prev_task.link(task) - # set the results parent attribute. - if not res.parent: - res.parent = prev_res - - if not isinstance(prev_task, chord): - results.append(res) - tasks.append(task) - prev_task, prev_res = task, res - - return tasks, results - - def apply_async(self, args=(), kwargs={}, group_id=None, chord=None, - task_id=None, link=None, link_error=None, **options): - if self.app.conf.CELERY_ALWAYS_EAGER: - return self.apply(args, kwargs, **options) - options.pop('publisher', None) - tasks, results = self.prepare_steps(args, kwargs['tasks']) - result = results[-1] - if group_id: - tasks[-1].set(group_id=group_id) - if chord: - tasks[-1].set(chord=chord) - if task_id: - tasks[-1].set(task_id=task_id) - result = tasks[-1].type.AsyncResult(task_id) - # make sure we can do a link() and link_error() on a chain object. - if link: - tasks[-1].set(link=link) - # and if any task in the chain fails, call the errbacks - if link_error: - for task in tasks: - task.set(link_error=link_error) - tasks[0].apply_async(**options) - return result - - def apply(self, args=(), kwargs={}, signature=maybe_signature, - **options): - app = self.app - last, fargs = None, args # fargs passed to first task only - for task in kwargs['tasks']: - res = signature(task, app=app).clone(fargs).apply( - last and (last.get(), ), - ) - res.parent, last, fargs = last, res, None - return last - return Chain - - -@connect_on_app_finalize -def add_chord_task(app): - """Every chord is executed in a dedicated task, so that the chord - can be used as a signature, and this generates the task - responsible for that.""" - from celery import group - from celery.canvas import maybe_signature - _app = app - default_propagate = app.conf.CELERY_CHORD_PROPAGATES - - class Chord(app.Task): - app = _app - name = 'celery.chord' - accept_magic_kwargs = False - ignore_result = False - _decorated = True - - def run(self, header, body, partial_args=(), interval=None, - countdown=1, max_retries=None, propagate=None, - eager=False, **kwargs): - app = self.app - propagate = default_propagate if propagate is None else propagate - group_id = uuid() - - # - convert back to group if serialized - tasks = header.tasks if isinstance(header, group) else header - header = group([ - maybe_signature(s, app=app).clone() for s in tasks - ], app=self.app) - # - eager applies the group inline - if eager: - return header.apply(args=partial_args, task_id=group_id) - - body['chord_size'] = len(header.tasks) - results = header.freeze(group_id=group_id, chord=body).results - - return self.backend.apply_chord( - header, partial_args, group_id, - body, interval=interval, countdown=countdown, - max_retries=max_retries, propagate=propagate, result=results, - ) - - def apply_async(self, args=(), kwargs={}, task_id=None, - group_id=None, chord=None, **options): - app = self.app - if app.conf.CELERY_ALWAYS_EAGER: - return self.apply(args, kwargs, **options) - header = kwargs.pop('header') - body = kwargs.pop('body') - header, body = (maybe_signature(header, app=app), - maybe_signature(body, app=app)) - # forward certain options to body - if chord is not None: - body.options['chord'] = chord - if group_id is not None: - body.options['group_id'] = group_id - [body.link(s) for s in options.pop('link', [])] - [body.link_error(s) for s in options.pop('link_error', [])] - body_result = body.freeze(task_id) - parent = super(Chord, self).apply_async((header, body, args), - kwargs, **options) - body_result.parent = parent - return body_result - - def apply(self, args=(), kwargs={}, propagate=True, **options): - body = kwargs['body'] - res = super(Chord, self).apply(args, dict(kwargs, eager=True), - **options) - return maybe_signature(body, app=self.app).apply( - args=(res.get(propagate=propagate).get(), )) - return Chord diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/control.py b/thesisenv/lib/python3.6/site-packages/celery/app/control.py deleted file mode 100644 index 7258dd6..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/control.py +++ /dev/null @@ -1,317 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.app.control - ~~~~~~~~~~~~~~~~~~~ - - Client for worker remote control commands. - Server implementation is in :mod:`celery.worker.control`. - -""" -from __future__ import absolute_import - -import warnings - -from kombu.pidbox import Mailbox -from kombu.utils import cached_property - -from celery.exceptions import DuplicateNodenameWarning -from celery.utils.text import pluralize - -__all__ = ['Inspect', 'Control', 'flatten_reply'] - -W_DUPNODE = """\ -Received multiple replies from node {0}: {1}. -Please make sure you give each node a unique nodename using the `-n` option.\ -""" - - -def flatten_reply(reply): - nodes, dupes = {}, set() - for item in reply: - [dupes.add(name) for name in item if name in nodes] - nodes.update(item) - if dupes: - warnings.warn(DuplicateNodenameWarning( - W_DUPNODE.format( - pluralize(len(dupes), 'name'), ', '.join(sorted(dupes)), - ), - )) - return nodes - - -class Inspect(object): - app = None - - def __init__(self, destination=None, timeout=1, callback=None, - connection=None, app=None, limit=None): - self.app = app or self.app - self.destination = destination - self.timeout = timeout - self.callback = callback - self.connection = connection - self.limit = limit - - def _prepare(self, reply): - if not reply: - return - by_node = flatten_reply(reply) - if self.destination and \ - not isinstance(self.destination, (list, tuple)): - return by_node.get(self.destination) - return by_node - - def _request(self, command, **kwargs): - return self._prepare(self.app.control.broadcast( - command, - arguments=kwargs, - destination=self.destination, - callback=self.callback, - connection=self.connection, - limit=self.limit, - timeout=self.timeout, reply=True, - )) - - def report(self): - return self._request('report') - - def clock(self): - return self._request('clock') - - def active(self, safe=False): - return self._request('dump_active', safe=safe) - - def scheduled(self, safe=False): - return self._request('dump_schedule', safe=safe) - - def reserved(self, safe=False): - return self._request('dump_reserved', safe=safe) - - def stats(self): - return self._request('stats') - - def revoked(self): - return self._request('dump_revoked') - - def registered(self, *taskinfoitems): - return self._request('dump_tasks', taskinfoitems=taskinfoitems) - registered_tasks = registered - - def ping(self): - return self._request('ping') - - def active_queues(self): - return self._request('active_queues') - - def query_task(self, ids): - return self._request('query_task', ids=ids) - - def conf(self, with_defaults=False): - return self._request('dump_conf', with_defaults=with_defaults) - - def hello(self, from_node, revoked=None): - return self._request('hello', from_node=from_node, revoked=revoked) - - def memsample(self): - return self._request('memsample') - - def memdump(self, samples=10): - return self._request('memdump', samples=samples) - - def objgraph(self, type='Request', n=200, max_depth=10): - return self._request('objgraph', num=n, max_depth=max_depth, type=type) - - -class Control(object): - Mailbox = Mailbox - - def __init__(self, app=None): - self.app = app - self.mailbox = self.Mailbox('celery', type='fanout', accept=['json']) - - @cached_property - def inspect(self): - return self.app.subclass_with_self(Inspect, reverse='control.inspect') - - def purge(self, connection=None): - """Discard all waiting tasks. - - This will ignore all tasks waiting for execution, and they will - be deleted from the messaging server. - - :returns: the number of tasks discarded. - - """ - with self.app.connection_or_acquire(connection) as conn: - return self.app.amqp.TaskConsumer(conn).purge() - discard_all = purge - - def election(self, id, topic, action=None, connection=None): - self.broadcast('election', connection=connection, arguments={ - 'id': id, 'topic': topic, 'action': action, - }) - - def revoke(self, task_id, destination=None, terminate=False, - signal='SIGTERM', **kwargs): - """Tell all (or specific) workers to revoke a task by id. - - If a task is revoked, the workers will ignore the task and - not execute it after all. - - :param task_id: Id of the task to revoke. - :keyword terminate: Also terminate the process currently working - on the task (if any). - :keyword signal: Name of signal to send to process if terminate. - Default is TERM. - - See :meth:`broadcast` for supported keyword arguments. - - """ - return self.broadcast('revoke', destination=destination, - arguments={'task_id': task_id, - 'terminate': terminate, - 'signal': signal}, **kwargs) - - def ping(self, destination=None, timeout=1, **kwargs): - """Ping all (or specific) workers. - - Will return the list of answers. - - See :meth:`broadcast` for supported keyword arguments. - - """ - return self.broadcast('ping', reply=True, destination=destination, - timeout=timeout, **kwargs) - - def rate_limit(self, task_name, rate_limit, destination=None, **kwargs): - """Tell all (or specific) workers to set a new rate limit - for task by type. - - :param task_name: Name of task to change rate limit for. - :param rate_limit: The rate limit as tasks per second, or a rate limit - string (`'100/m'`, etc. - see :attr:`celery.task.base.Task.rate_limit` for - more information). - - See :meth:`broadcast` for supported keyword arguments. - - """ - return self.broadcast('rate_limit', destination=destination, - arguments={'task_name': task_name, - 'rate_limit': rate_limit}, - **kwargs) - - def add_consumer(self, queue, exchange=None, exchange_type='direct', - routing_key=None, options=None, **kwargs): - """Tell all (or specific) workers to start consuming from a new queue. - - Only the queue name is required as if only the queue is specified - then the exchange/routing key will be set to the same name ( - like automatic queues do). - - .. note:: - - This command does not respect the default queue/exchange - options in the configuration. - - :param queue: Name of queue to start consuming from. - :keyword exchange: Optional name of exchange. - :keyword exchange_type: Type of exchange (defaults to 'direct') - command to, when empty broadcast to all workers. - :keyword routing_key: Optional routing key. - :keyword options: Additional options as supported - by :meth:`kombu.entitiy.Queue.from_dict`. - - See :meth:`broadcast` for supported keyword arguments. - - """ - return self.broadcast( - 'add_consumer', - arguments=dict({'queue': queue, 'exchange': exchange, - 'exchange_type': exchange_type, - 'routing_key': routing_key}, **options or {}), - **kwargs - ) - - def cancel_consumer(self, queue, **kwargs): - """Tell all (or specific) workers to stop consuming from ``queue``. - - Supports the same keyword arguments as :meth:`broadcast`. - - """ - return self.broadcast( - 'cancel_consumer', arguments={'queue': queue}, **kwargs - ) - - def time_limit(self, task_name, soft=None, hard=None, **kwargs): - """Tell all (or specific) workers to set time limits for - a task by type. - - :param task_name: Name of task to change time limits for. - :keyword soft: New soft time limit (in seconds). - :keyword hard: New hard time limit (in seconds). - - Any additional keyword arguments are passed on to :meth:`broadcast`. - - """ - return self.broadcast( - 'time_limit', - arguments={'task_name': task_name, - 'hard': hard, 'soft': soft}, **kwargs) - - def enable_events(self, destination=None, **kwargs): - """Tell all (or specific) workers to enable events.""" - return self.broadcast('enable_events', {}, destination, **kwargs) - - def disable_events(self, destination=None, **kwargs): - """Tell all (or specific) workers to disable events.""" - return self.broadcast('disable_events', {}, destination, **kwargs) - - def pool_grow(self, n=1, destination=None, **kwargs): - """Tell all (or specific) workers to grow the pool by ``n``. - - Supports the same arguments as :meth:`broadcast`. - - """ - return self.broadcast('pool_grow', {'n': n}, destination, **kwargs) - - def pool_shrink(self, n=1, destination=None, **kwargs): - """Tell all (or specific) workers to shrink the pool by ``n``. - - Supports the same arguments as :meth:`broadcast`. - - """ - return self.broadcast('pool_shrink', {'n': n}, destination, **kwargs) - - def autoscale(self, max, min, destination=None, **kwargs): - """Change worker(s) autoscale setting. - - Supports the same arguments as :meth:`broadcast`. - - """ - return self.broadcast( - 'autoscale', {'max': max, 'min': min}, destination, **kwargs) - - def broadcast(self, command, arguments=None, destination=None, - connection=None, reply=False, timeout=1, limit=None, - callback=None, channel=None, **extra_kwargs): - """Broadcast a control command to the celery workers. - - :param command: Name of command to send. - :param arguments: Keyword arguments for the command. - :keyword destination: If set, a list of the hosts to send the - command to, when empty broadcast to all workers. - :keyword connection: Custom broker connection to use, if not set, - a connection will be established automatically. - :keyword reply: Wait for and return the reply. - :keyword timeout: Timeout in seconds to wait for the reply. - :keyword limit: Limit number of replies. - :keyword callback: Callback called immediately for each reply - received. - - """ - with self.app.connection_or_acquire(connection) as conn: - arguments = dict(arguments or {}, **extra_kwargs) - return self.mailbox(conn)._broadcast( - command, arguments, destination, reply, timeout, - limit, callback, channel=channel, - ) diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/defaults.py b/thesisenv/lib/python3.6/site-packages/celery/app/defaults.py deleted file mode 100644 index aa7dd45..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/defaults.py +++ /dev/null @@ -1,274 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.app.defaults - ~~~~~~~~~~~~~~~~~~~ - - Configuration introspection and defaults. - -""" -from __future__ import absolute_import - -import sys - -from collections import deque, namedtuple -from datetime import timedelta - -from celery.five import items -from celery.utils import strtobool -from celery.utils.functional import memoize - -__all__ = ['Option', 'NAMESPACES', 'flatten', 'find'] - -is_jython = sys.platform.startswith('java') -is_pypy = hasattr(sys, 'pypy_version_info') - -DEFAULT_POOL = 'prefork' -if is_jython: - DEFAULT_POOL = 'threads' -elif is_pypy: - if sys.pypy_version_info[0:3] < (1, 5, 0): - DEFAULT_POOL = 'solo' - else: - DEFAULT_POOL = 'prefork' - -DEFAULT_ACCEPT_CONTENT = ['json', 'pickle', 'msgpack', 'yaml'] -DEFAULT_PROCESS_LOG_FMT = """ - [%(asctime)s: %(levelname)s/%(processName)s] %(message)s -""".strip() -DEFAULT_LOG_FMT = '[%(asctime)s: %(levelname)s] %(message)s' -DEFAULT_TASK_LOG_FMT = """[%(asctime)s: %(levelname)s/%(processName)s] \ -%(task_name)s[%(task_id)s]: %(message)s""" - -_BROKER_OLD = {'deprecate_by': '2.5', 'remove_by': '4.0', - 'alt': 'BROKER_URL setting'} -_REDIS_OLD = {'deprecate_by': '2.5', 'remove_by': '4.0', - 'alt': 'URL form of CELERY_RESULT_BACKEND'} - -searchresult = namedtuple('searchresult', ('namespace', 'key', 'type')) - - -# logging: processName first introduced in Py 2.6.2 (Issue #1644). -if sys.version_info < (2, 6, 2): - DEFAULT_PROCESS_LOG_FMT = DEFAULT_LOG_FMT - - -class Option(object): - alt = None - deprecate_by = None - remove_by = None - typemap = dict(string=str, int=int, float=float, any=lambda v: v, - bool=strtobool, dict=dict, tuple=tuple) - - def __init__(self, default=None, *args, **kwargs): - self.default = default - self.type = kwargs.get('type') or 'string' - for attr, value in items(kwargs): - setattr(self, attr, value) - - def to_python(self, value): - return self.typemap[self.type](value) - - def __repr__(self): - return '{0} default->{1!r}>'.format(self.type, - self.default) - -NAMESPACES = { - 'BROKER': { - 'URL': Option(None, type='string'), - 'CONNECTION_TIMEOUT': Option(4, type='float'), - 'CONNECTION_RETRY': Option(True, type='bool'), - 'CONNECTION_MAX_RETRIES': Option(100, type='int'), - 'FAILOVER_STRATEGY': Option(None, type='string'), - 'HEARTBEAT': Option(None, type='int'), - 'HEARTBEAT_CHECKRATE': Option(3.0, type='int'), - 'LOGIN_METHOD': Option(None, type='string'), - 'POOL_LIMIT': Option(10, type='int'), - 'USE_SSL': Option(False, type='bool'), - 'TRANSPORT': Option(type='string'), - 'TRANSPORT_OPTIONS': Option({}, type='dict'), - 'HOST': Option(type='string', **_BROKER_OLD), - 'PORT': Option(type='int', **_BROKER_OLD), - 'USER': Option(type='string', **_BROKER_OLD), - 'PASSWORD': Option(type='string', **_BROKER_OLD), - 'VHOST': Option(type='string', **_BROKER_OLD), - }, - 'CASSANDRA': { - 'COLUMN_FAMILY': Option(type='string'), - 'DETAILED_MODE': Option(False, type='bool'), - 'KEYSPACE': Option(type='string'), - 'READ_CONSISTENCY': Option(type='string'), - 'SERVERS': Option(type='list'), - 'WRITE_CONSISTENCY': Option(type='string'), - }, - 'CELERY': { - 'ACCEPT_CONTENT': Option(DEFAULT_ACCEPT_CONTENT, type='list'), - 'ACKS_LATE': Option(False, type='bool'), - 'ALWAYS_EAGER': Option(False, type='bool'), - 'ANNOTATIONS': Option(type='any'), - 'BROADCAST_QUEUE': Option('celeryctl'), - 'BROADCAST_EXCHANGE': Option('celeryctl'), - 'BROADCAST_EXCHANGE_TYPE': Option('fanout'), - 'CACHE_BACKEND': Option(), - 'CACHE_BACKEND_OPTIONS': Option({}, type='dict'), - 'CHORD_PROPAGATES': Option(True, type='bool'), - 'COUCHBASE_BACKEND_SETTINGS': Option(None, type='dict'), - 'CREATE_MISSING_QUEUES': Option(True, type='bool'), - 'DEFAULT_RATE_LIMIT': Option(type='string'), - 'DISABLE_RATE_LIMITS': Option(False, type='bool'), - 'DEFAULT_ROUTING_KEY': Option('celery'), - 'DEFAULT_QUEUE': Option('celery'), - 'DEFAULT_EXCHANGE': Option('celery'), - 'DEFAULT_EXCHANGE_TYPE': Option('direct'), - 'DEFAULT_DELIVERY_MODE': Option(2, type='string'), - 'EAGER_PROPAGATES_EXCEPTIONS': Option(False, type='bool'), - 'ENABLE_UTC': Option(True, type='bool'), - 'ENABLE_REMOTE_CONTROL': Option(True, type='bool'), - 'EVENT_SERIALIZER': Option('json'), - 'EVENT_QUEUE_EXPIRES': Option(None, type='float'), - 'EVENT_QUEUE_TTL': Option(None, type='float'), - 'IMPORTS': Option((), type='tuple'), - 'INCLUDE': Option((), type='tuple'), - 'IGNORE_RESULT': Option(False, type='bool'), - 'MAX_CACHED_RESULTS': Option(100, type='int'), - 'MESSAGE_COMPRESSION': Option(type='string'), - 'MONGODB_BACKEND_SETTINGS': Option(type='dict'), - 'REDIS_HOST': Option(type='string', **_REDIS_OLD), - 'REDIS_PORT': Option(type='int', **_REDIS_OLD), - 'REDIS_DB': Option(type='int', **_REDIS_OLD), - 'REDIS_PASSWORD': Option(type='string', **_REDIS_OLD), - 'REDIS_MAX_CONNECTIONS': Option(type='int'), - 'RESULT_BACKEND': Option(type='string'), - 'RESULT_DB_SHORT_LIVED_SESSIONS': Option(False, type='bool'), - 'RESULT_DB_TABLENAMES': Option(type='dict'), - 'RESULT_DBURI': Option(), - 'RESULT_ENGINE_OPTIONS': Option(type='dict'), - 'RESULT_EXCHANGE': Option('celeryresults'), - 'RESULT_EXCHANGE_TYPE': Option('direct'), - 'RESULT_SERIALIZER': Option('pickle'), - 'RESULT_PERSISTENT': Option(None, type='bool'), - 'ROUTES': Option(type='any'), - 'SEND_EVENTS': Option(False, type='bool'), - 'SEND_TASK_ERROR_EMAILS': Option(False, type='bool'), - 'SEND_TASK_SENT_EVENT': Option(False, type='bool'), - 'STORE_ERRORS_EVEN_IF_IGNORED': Option(False, type='bool'), - 'TASK_PUBLISH_RETRY': Option(True, type='bool'), - 'TASK_PUBLISH_RETRY_POLICY': Option({ - 'max_retries': 3, - 'interval_start': 0, - 'interval_max': 1, - 'interval_step': 0.2}, type='dict'), - 'TASK_RESULT_EXPIRES': Option(timedelta(days=1), type='float'), - 'TASK_SERIALIZER': Option('pickle'), - 'TIMEZONE': Option(type='string'), - 'TRACK_STARTED': Option(False, type='bool'), - 'REDIRECT_STDOUTS': Option(True, type='bool'), - 'REDIRECT_STDOUTS_LEVEL': Option('WARNING'), - 'QUEUES': Option(type='dict'), - 'QUEUE_HA_POLICY': Option(None, type='string'), - 'SECURITY_KEY': Option(type='string'), - 'SECURITY_CERTIFICATE': Option(type='string'), - 'SECURITY_CERT_STORE': Option(type='string'), - 'WORKER_DIRECT': Option(False, type='bool'), - }, - 'CELERYD': { - 'AGENT': Option(None, type='string'), - 'AUTOSCALER': Option('celery.worker.autoscale:Autoscaler'), - 'AUTORELOADER': Option('celery.worker.autoreload:Autoreloader'), - 'CONCURRENCY': Option(0, type='int'), - 'TIMER': Option(type='string'), - 'TIMER_PRECISION': Option(1.0, type='float'), - 'FORCE_EXECV': Option(False, type='bool'), - 'HIJACK_ROOT_LOGGER': Option(True, type='bool'), - 'CONSUMER': Option('celery.worker.consumer:Consumer', type='string'), - 'LOG_FORMAT': Option(DEFAULT_PROCESS_LOG_FMT), - 'LOG_COLOR': Option(type='bool'), - 'LOG_LEVEL': Option('WARN', deprecate_by='2.4', remove_by='4.0', - alt='--loglevel argument'), - 'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0', - alt='--logfile argument'), - 'MAX_TASKS_PER_CHILD': Option(type='int'), - 'POOL': Option(DEFAULT_POOL), - 'POOL_PUTLOCKS': Option(True, type='bool'), - 'POOL_RESTARTS': Option(False, type='bool'), - 'PREFETCH_MULTIPLIER': Option(4, type='int'), - 'STATE_DB': Option(), - 'TASK_LOG_FORMAT': Option(DEFAULT_TASK_LOG_FMT), - 'TASK_SOFT_TIME_LIMIT': Option(type='float'), - 'TASK_TIME_LIMIT': Option(type='float'), - 'WORKER_LOST_WAIT': Option(10.0, type='float') - }, - 'CELERYBEAT': { - 'SCHEDULE': Option({}, type='dict'), - 'SCHEDULER': Option('celery.beat:PersistentScheduler'), - 'SCHEDULE_FILENAME': Option('celerybeat-schedule'), - 'SYNC_EVERY': Option(0, type='int'), - 'MAX_LOOP_INTERVAL': Option(0, type='float'), - 'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='4.0', - alt='--loglevel argument'), - 'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0', - alt='--logfile argument'), - }, - 'CELERYMON': { - 'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='4.0', - alt='--loglevel argument'), - 'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0', - alt='--logfile argument'), - 'LOG_FORMAT': Option(DEFAULT_LOG_FMT), - }, - 'EMAIL': { - 'HOST': Option('localhost'), - 'PORT': Option(25, type='int'), - 'HOST_USER': Option(), - 'HOST_PASSWORD': Option(), - 'TIMEOUT': Option(2, type='float'), - 'USE_SSL': Option(False, type='bool'), - 'USE_TLS': Option(False, type='bool'), - }, - 'SERVER_EMAIL': Option('celery@localhost'), - 'ADMINS': Option((), type='tuple'), -} - - -def flatten(d, ns=''): - stack = deque([(ns, d)]) - while stack: - name, space = stack.popleft() - for key, value in items(space): - if isinstance(value, dict): - stack.append((name + key + '_', value)) - else: - yield name + key, value -DEFAULTS = dict((key, value.default) for key, value in flatten(NAMESPACES)) - - -def find_deprecated_settings(source): - from celery.utils import warn_deprecated - for name, opt in flatten(NAMESPACES): - if (opt.deprecate_by or opt.remove_by) and getattr(source, name, None): - warn_deprecated(description='The {0!r} setting'.format(name), - deprecation=opt.deprecate_by, - removal=opt.remove_by, - alternative='Use the {0.alt} instead'.format(opt)) - return source - - -@memoize(maxsize=None) -def find(name, namespace='celery'): - # - Try specified namespace first. - namespace = namespace.upper() - try: - return searchresult( - namespace, name.upper(), NAMESPACES[namespace][name.upper()], - ) - except KeyError: - # - Try all the other namespaces. - for ns, keys in items(NAMESPACES): - if ns.upper() == name.upper(): - return searchresult(None, ns, keys) - elif isinstance(keys, dict): - try: - return searchresult(ns, name.upper(), keys[name.upper()]) - except KeyError: - pass - # - See if name is a qualname last. - return searchresult(None, name.upper(), DEFAULTS[name.upper()]) diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/log.py b/thesisenv/lib/python3.6/site-packages/celery/app/log.py deleted file mode 100644 index 3d350e9..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/log.py +++ /dev/null @@ -1,257 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.app.log - ~~~~~~~~~~~~~~ - - The Celery instances logging section: ``Celery.log``. - - Sets up logging for the worker and other programs, - redirects stdouts, colors log output, patches logging - related compatibility fixes, and so on. - -""" -from __future__ import absolute_import - -import logging -import os -import sys - -from logging.handlers import WatchedFileHandler - -from kombu.log import NullHandler -from kombu.utils.encoding import set_default_encoding_file - -from celery import signals -from celery._state import get_current_task -from celery.five import class_property, string_t -from celery.utils import isatty, node_format -from celery.utils.log import ( - get_logger, mlevel, - ColorFormatter, ensure_process_aware_logger, - LoggingProxy, get_multiprocessing_logger, - reset_multiprocessing_logger, -) -from celery.utils.term import colored - -__all__ = ['TaskFormatter', 'Logging'] - -MP_LOG = os.environ.get('MP_LOG', False) - - -class TaskFormatter(ColorFormatter): - - def format(self, record): - task = get_current_task() - if task and task.request: - record.__dict__.update(task_id=task.request.id, - task_name=task.name) - else: - record.__dict__.setdefault('task_name', '???') - record.__dict__.setdefault('task_id', '???') - return ColorFormatter.format(self, record) - - -class Logging(object): - #: The logging subsystem is only configured once per process. - #: setup_logging_subsystem sets this flag, and subsequent calls - #: will do nothing. - _setup = False - - def __init__(self, app): - self.app = app - self.loglevel = mlevel(self.app.conf.CELERYD_LOG_LEVEL) - self.format = self.app.conf.CELERYD_LOG_FORMAT - self.task_format = self.app.conf.CELERYD_TASK_LOG_FORMAT - self.colorize = self.app.conf.CELERYD_LOG_COLOR - - def setup(self, loglevel=None, logfile=None, redirect_stdouts=False, - redirect_level='WARNING', colorize=None, hostname=None): - handled = self.setup_logging_subsystem( - loglevel, logfile, colorize=colorize, hostname=hostname, - ) - if not handled: - if redirect_stdouts: - self.redirect_stdouts(redirect_level) - os.environ.update( - CELERY_LOG_LEVEL=str(loglevel) if loglevel else '', - CELERY_LOG_FILE=str(logfile) if logfile else '', - ) - return handled - - def redirect_stdouts(self, loglevel=None, name='celery.redirected'): - self.redirect_stdouts_to_logger( - get_logger(name), loglevel=loglevel - ) - os.environ.update( - CELERY_LOG_REDIRECT='1', - CELERY_LOG_REDIRECT_LEVEL=str(loglevel or ''), - ) - - def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None, - colorize=None, hostname=None, **kwargs): - if self.already_setup: - return - if logfile and hostname: - logfile = node_format(logfile, hostname) - self.already_setup = True - loglevel = mlevel(loglevel or self.loglevel) - format = format or self.format - colorize = self.supports_color(colorize, logfile) - reset_multiprocessing_logger() - ensure_process_aware_logger() - receivers = signals.setup_logging.send( - sender=None, loglevel=loglevel, logfile=logfile, - format=format, colorize=colorize, - ) - - if not receivers: - root = logging.getLogger() - - if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER: - root.handlers = [] - get_logger('celery').handlers = [] - get_logger('celery.task').handlers = [] - get_logger('celery.redirected').handlers = [] - - # Configure root logger - self._configure_logger( - root, logfile, loglevel, format, colorize, **kwargs - ) - - # Configure the multiprocessing logger - self._configure_logger( - get_multiprocessing_logger(), - logfile, loglevel if MP_LOG else logging.ERROR, - format, colorize, **kwargs - ) - - signals.after_setup_logger.send( - sender=None, logger=root, - loglevel=loglevel, logfile=logfile, - format=format, colorize=colorize, - ) - - # then setup the root task logger. - self.setup_task_loggers(loglevel, logfile, colorize=colorize) - - try: - stream = logging.getLogger().handlers[0].stream - except (AttributeError, IndexError): - pass - else: - set_default_encoding_file(stream) - - # This is a hack for multiprocessing's fork+exec, so that - # logging before Process.run works. - logfile_name = logfile if isinstance(logfile, string_t) else '' - os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel), - _MP_FORK_LOGFILE_=logfile_name, - _MP_FORK_LOGFORMAT_=format) - return receivers - - def _configure_logger(self, logger, logfile, loglevel, - format, colorize, **kwargs): - if logger is not None: - self.setup_handlers(logger, logfile, format, - colorize, **kwargs) - if loglevel: - logger.setLevel(loglevel) - - def setup_task_loggers(self, loglevel=None, logfile=None, format=None, - colorize=None, propagate=False, **kwargs): - """Setup the task logger. - - If `logfile` is not specified, then `sys.stderr` is used. - - Will return the base task logger object. - - """ - loglevel = mlevel(loglevel or self.loglevel) - format = format or self.task_format - colorize = self.supports_color(colorize, logfile) - - logger = self.setup_handlers( - get_logger('celery.task'), - logfile, format, colorize, - formatter=TaskFormatter, **kwargs - ) - logger.setLevel(loglevel) - # this is an int for some reason, better not question why. - logger.propagate = int(propagate) - signals.after_setup_task_logger.send( - sender=None, logger=logger, - loglevel=loglevel, logfile=logfile, - format=format, colorize=colorize, - ) - return logger - - def redirect_stdouts_to_logger(self, logger, loglevel=None, - stdout=True, stderr=True): - """Redirect :class:`sys.stdout` and :class:`sys.stderr` to a - logging instance. - - :param logger: The :class:`logging.Logger` instance to redirect to. - :param loglevel: The loglevel redirected messages will be logged as. - - """ - proxy = LoggingProxy(logger, loglevel) - if stdout: - sys.stdout = proxy - if stderr: - sys.stderr = proxy - return proxy - - def supports_color(self, colorize=None, logfile=None): - colorize = self.colorize if colorize is None else colorize - if self.app.IS_WINDOWS: - # Windows does not support ANSI color codes. - return False - if colorize or colorize is None: - # Only use color if there is no active log file - # and stderr is an actual terminal. - return logfile is None and isatty(sys.stderr) - return colorize - - def colored(self, logfile=None, enabled=None): - return colored(enabled=self.supports_color(enabled, logfile)) - - def setup_handlers(self, logger, logfile, format, colorize, - formatter=ColorFormatter, **kwargs): - if self._is_configured(logger): - return logger - handler = self._detect_handler(logfile) - handler.setFormatter(formatter(format, use_color=colorize)) - logger.addHandler(handler) - return logger - - def _detect_handler(self, logfile=None): - """Create log handler with either a filename, an open stream - or :const:`None` (stderr).""" - logfile = sys.__stderr__ if logfile is None else logfile - if hasattr(logfile, 'write'): - return logging.StreamHandler(logfile) - return WatchedFileHandler(logfile) - - def _has_handler(self, logger): - if logger.handlers: - return any(not isinstance(h, NullHandler) for h in logger.handlers) - - def _is_configured(self, logger): - return self._has_handler(logger) and not getattr( - logger, '_rudimentary_setup', False) - - def setup_logger(self, name='celery', *args, **kwargs): - """Deprecated: No longer used.""" - self.setup_logging_subsystem(*args, **kwargs) - return logging.root - - def get_default_logger(self, name='celery', **kwargs): - return get_logger(name) - - @class_property - def already_setup(cls): - return cls._setup - - @already_setup.setter # noqa - def already_setup(cls, was_setup): - cls._setup = was_setup diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/registry.py b/thesisenv/lib/python3.6/site-packages/celery/app/registry.py deleted file mode 100644 index 7046554..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/registry.py +++ /dev/null @@ -1,71 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.app.registry - ~~~~~~~~~~~~~~~~~~~ - - Registry of available tasks. - -""" -from __future__ import absolute_import - -import inspect - -from importlib import import_module - -from celery._state import get_current_app -from celery.exceptions import NotRegistered -from celery.five import items - -__all__ = ['TaskRegistry'] - - -class TaskRegistry(dict): - NotRegistered = NotRegistered - - def __missing__(self, key): - raise self.NotRegistered(key) - - def register(self, task): - """Register a task in the task registry. - - The task will be automatically instantiated if not already an - instance. - - """ - self[task.name] = inspect.isclass(task) and task() or task - - def unregister(self, name): - """Unregister task by name. - - :param name: name of the task to unregister, or a - :class:`celery.task.base.Task` with a valid `name` attribute. - - :raises celery.exceptions.NotRegistered: if the task has not - been registered. - - """ - try: - self.pop(getattr(name, 'name', name)) - except KeyError: - raise self.NotRegistered(name) - - # -- these methods are irrelevant now and will be removed in 4.0 - def regular(self): - return self.filter_types('regular') - - def periodic(self): - return self.filter_types('periodic') - - def filter_types(self, type): - return dict((name, task) for name, task in items(self) - if getattr(task, 'type', 'regular') == type) - - -def _unpickle_task(name): - return get_current_app().tasks[name] - - -def _unpickle_task_v2(name, module=None): - if module: - import_module(module) - return get_current_app().tasks[name] diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/routes.py b/thesisenv/lib/python3.6/site-packages/celery/app/routes.py deleted file mode 100644 index b1e7314..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/routes.py +++ /dev/null @@ -1,95 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.routes - ~~~~~~~~~~~~~ - - Contains utilities for working with task routers, - (:setting:`CELERY_ROUTES`). - -""" -from __future__ import absolute_import - -from celery.exceptions import QueueNotFound -from celery.five import string_t -from celery.utils import lpmerge -from celery.utils.functional import firstmethod, mlazy -from celery.utils.imports import instantiate - -__all__ = ['MapRoute', 'Router', 'prepare'] - -_first_route = firstmethod('route_for_task') - - -class MapRoute(object): - """Creates a router out of a :class:`dict`.""" - - def __init__(self, map): - self.map = map - - def route_for_task(self, task, *args, **kwargs): - try: - return dict(self.map[task]) - except KeyError: - pass - except ValueError: - return {'queue': self.map[task]} - - -class Router(object): - - def __init__(self, routes=None, queues=None, - create_missing=False, app=None): - self.app = app - self.queues = {} if queues is None else queues - self.routes = [] if routes is None else routes - self.create_missing = create_missing - - def route(self, options, task, args=(), kwargs={}): - options = self.expand_destination(options) # expands 'queue' - if self.routes: - route = self.lookup_route(task, args, kwargs) - if route: # expands 'queue' in route. - return lpmerge(self.expand_destination(route), options) - if 'queue' not in options: - options = lpmerge(self.expand_destination( - self.app.conf.CELERY_DEFAULT_QUEUE), options) - return options - - def expand_destination(self, route): - # Route can be a queue name: convenient for direct exchanges. - if isinstance(route, string_t): - queue, route = route, {} - else: - # can use defaults from configured queue, but override specific - # things (like the routing_key): great for topic exchanges. - queue = route.pop('queue', None) - - if queue: - try: - Q = self.queues[queue] # noqa - except KeyError: - raise QueueNotFound( - 'Queue {0!r} missing from CELERY_QUEUES'.format(queue)) - # needs to be declared by publisher - route['queue'] = Q - return route - - def lookup_route(self, task, args=None, kwargs=None): - return _first_route(self.routes, task, args, kwargs) - - -def prepare(routes): - """Expands the :setting:`CELERY_ROUTES` setting.""" - - def expand_route(route): - if isinstance(route, dict): - return MapRoute(route) - if isinstance(route, string_t): - return mlazy(instantiate, route) - return route - - if routes is None: - return () - if not isinstance(routes, (list, tuple)): - routes = (routes, ) - return [expand_route(route) for route in routes] diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/task.py b/thesisenv/lib/python3.6/site-packages/celery/app/task.py deleted file mode 100644 index 3360005..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/task.py +++ /dev/null @@ -1,948 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.app.task - ~~~~~~~~~~~~~~~ - - Task Implementation: Task request context, and the base task class. - -""" -from __future__ import absolute_import - -import sys - -from billiard.einfo import ExceptionInfo - -from celery import current_app -from celery import states -from celery._state import _task_stack -from celery.canvas import signature -from celery.exceptions import MaxRetriesExceededError, Reject, Retry -from celery.five import class_property, items, with_metaclass -from celery.local import Proxy -from celery.result import EagerResult -from celery.utils import gen_task_name, fun_takes_kwargs, uuid, maybe_reraise -from celery.utils.functional import mattrgetter, maybe_list -from celery.utils.imports import instantiate -from celery.utils.mail import ErrorMail - -from .annotations import resolve_all as resolve_all_annotations -from .registry import _unpickle_task_v2 -from .utils import appstr - -__all__ = ['Context', 'Task'] - -#: extracts attributes related to publishing a message from an object. -extract_exec_options = mattrgetter( - 'queue', 'routing_key', 'exchange', 'priority', 'expires', - 'serializer', 'delivery_mode', 'compression', 'time_limit', - 'soft_time_limit', 'immediate', 'mandatory', # imm+man is deprecated -) - -# We take __repr__ very seriously around here ;) -R_BOUND_TASK = '' -R_UNBOUND_TASK = '' -R_SELF_TASK = '<@task {0.name} bound to other {0.__self__}>' -R_INSTANCE = '<@task: {0.name} of {app}{flags}>' - - -class _CompatShared(object): - - def __init__(self, name, cons): - self.name = name - self.cons = cons - - def __hash__(self): - return hash(self.name) - - def __repr__(self): - return '' % (self.name, ) - - def __call__(self, app): - return self.cons(app) - - -def _strflags(flags, default=''): - if flags: - return ' ({0})'.format(', '.join(flags)) - return default - - -def _reprtask(task, fmt=None, flags=None): - flags = list(flags) if flags is not None else [] - flags.append('v2 compatible') if task.__v2_compat__ else None - if not fmt: - fmt = R_BOUND_TASK if task._app else R_UNBOUND_TASK - return fmt.format( - task, flags=_strflags(flags), - app=appstr(task._app) if task._app else None, - ) - - -class Context(object): - # Default context - logfile = None - loglevel = None - hostname = None - id = None - args = None - kwargs = None - retries = 0 - eta = None - expires = None - is_eager = False - headers = None - delivery_info = None - reply_to = None - correlation_id = None - taskset = None # compat alias to group - group = None - chord = None - utc = None - called_directly = True - callbacks = None - errbacks = None - timelimit = None - _children = None # see property - _protected = 0 - - def __init__(self, *args, **kwargs): - self.update(*args, **kwargs) - - def update(self, *args, **kwargs): - return self.__dict__.update(*args, **kwargs) - - def clear(self): - return self.__dict__.clear() - - def get(self, key, default=None): - return getattr(self, key, default) - - def __repr__(self): - return ''.format(vars(self)) - - @property - def children(self): - # children must be an empy list for every thread - if self._children is None: - self._children = [] - return self._children - - -class TaskType(type): - """Meta class for tasks. - - Automatically registers the task in the task registry (except - if the :attr:`Task.abstract`` attribute is set). - - If no :attr:`Task.name` attribute is provided, then the name is generated - from the module and class name. - - """ - _creation_count = {} # used by old non-abstract task classes - - def __new__(cls, name, bases, attrs): - new = super(TaskType, cls).__new__ - task_module = attrs.get('__module__') or '__main__' - - # - Abstract class: abstract attribute should not be inherited. - abstract = attrs.pop('abstract', None) - if abstract or not attrs.get('autoregister', True): - return new(cls, name, bases, attrs) - - # The 'app' attribute is now a property, with the real app located - # in the '_app' attribute. Previously this was a regular attribute, - # so we should support classes defining it. - app = attrs.pop('_app', None) or attrs.pop('app', None) - - # Attempt to inherit app from one the bases - if not isinstance(app, Proxy) and app is None: - for base in bases: - if getattr(base, '_app', None): - app = base._app - break - else: - app = current_app._get_current_object() - attrs['_app'] = app - - # - Automatically generate missing/empty name. - task_name = attrs.get('name') - if not task_name: - attrs['name'] = task_name = gen_task_name(app, name, task_module) - - if not attrs.get('_decorated'): - # non decorated tasks must also be shared in case - # an app is created multiple times due to modules - # imported under multiple names. - # Hairy stuff, here to be compatible with 2.x. - # People should not use non-abstract task classes anymore, - # use the task decorator. - from celery._state import connect_on_app_finalize - unique_name = '.'.join([task_module, name]) - if unique_name not in cls._creation_count: - # the creation count is used as a safety - # so that the same task is not added recursively - # to the set of constructors. - cls._creation_count[unique_name] = 1 - connect_on_app_finalize(_CompatShared( - unique_name, - lambda app: TaskType.__new__(cls, name, bases, - dict(attrs, _app=app)), - )) - - # - Create and register class. - # Because of the way import happens (recursively) - # we may or may not be the first time the task tries to register - # with the framework. There should only be one class for each task - # name, so we always return the registered version. - tasks = app._tasks - if task_name not in tasks: - tasks.register(new(cls, name, bases, attrs)) - instance = tasks[task_name] - instance.bind(app) - return instance.__class__ - - def __repr__(cls): - return _reprtask(cls) - - -@with_metaclass(TaskType) -class Task(object): - """Task base class. - - When called tasks apply the :meth:`run` method. This method must - be defined by all tasks (that is unless the :meth:`__call__` method - is overridden). - - """ - __trace__ = None - __v2_compat__ = False # set by old base in celery.task.base - - ErrorMail = ErrorMail - MaxRetriesExceededError = MaxRetriesExceededError - - #: Execution strategy used, or the qualified name of one. - Strategy = 'celery.worker.strategy:default' - - #: This is the instance bound to if the task is a method of a class. - __self__ = None - - #: The application instance associated with this task class. - _app = None - - #: Name of the task. - name = None - - #: If :const:`True` the task is an abstract base class. - abstract = True - - #: If disabled the worker will not forward magic keyword arguments. - #: Deprecated and scheduled for removal in v4.0. - accept_magic_kwargs = False - - #: Maximum number of retries before giving up. If set to :const:`None`, - #: it will **never** stop retrying. - max_retries = 3 - - #: Default time in seconds before a retry of the task should be - #: executed. 3 minutes by default. - default_retry_delay = 3 * 60 - - #: Rate limit for this task type. Examples: :const:`None` (no rate - #: limit), `'100/s'` (hundred tasks a second), `'100/m'` (hundred tasks - #: a minute),`'100/h'` (hundred tasks an hour) - rate_limit = None - - #: If enabled the worker will not store task state and return values - #: for this task. Defaults to the :setting:`CELERY_IGNORE_RESULT` - #: setting. - ignore_result = None - - #: If enabled the request will keep track of subtasks started by - #: this task, and this information will be sent with the result - #: (``result.children``). - trail = True - - #: If enabled the worker will send monitoring events related to - #: this task (but only if the worker is configured to send - #: task related events). - #: Note that this has no effect on the task-failure event case - #: where a task is not registered (as it will have no task class - #: to check this flag). - send_events = True - - #: When enabled errors will be stored even if the task is otherwise - #: configured to ignore results. - store_errors_even_if_ignored = None - - #: If enabled an email will be sent to :setting:`ADMINS` whenever a task - #: of this type fails. - send_error_emails = None - - #: The name of a serializer that are registered with - #: :mod:`kombu.serialization.registry`. Default is `'pickle'`. - serializer = None - - #: Hard time limit. - #: Defaults to the :setting:`CELERYD_TASK_TIME_LIMIT` setting. - time_limit = None - - #: Soft time limit. - #: Defaults to the :setting:`CELERYD_TASK_SOFT_TIME_LIMIT` setting. - soft_time_limit = None - - #: The result store backend used for this task. - backend = None - - #: If disabled this task won't be registered automatically. - autoregister = True - - #: If enabled the task will report its status as 'started' when the task - #: is executed by a worker. Disabled by default as the normal behaviour - #: is to not report that level of granularity. Tasks are either pending, - #: finished, or waiting to be retried. - #: - #: Having a 'started' status can be useful for when there are long - #: running tasks and there is a need to report which task is currently - #: running. - #: - #: The application default can be overridden using the - #: :setting:`CELERY_TRACK_STARTED` setting. - track_started = None - - #: When enabled messages for this task will be acknowledged **after** - #: the task has been executed, and not *just before* which is the - #: default behavior. - #: - #: Please note that this means the task may be executed twice if the - #: worker crashes mid execution (which may be acceptable for some - #: applications). - #: - #: The application default can be overridden with the - #: :setting:`CELERY_ACKS_LATE` setting. - acks_late = None - - #: Tuple of expected exceptions. - #: - #: These are errors that are expected in normal operation - #: and that should not be regarded as a real error by the worker. - #: Currently this means that the state will be updated to an error - #: state, but the worker will not log the event as an error. - throws = () - - #: Default task expiry time. - expires = None - - #: Some may expect a request to exist even if the task has not been - #: called. This should probably be deprecated. - _default_request = None - - _exec_options = None - - __bound__ = False - - from_config = ( - ('send_error_emails', 'CELERY_SEND_TASK_ERROR_EMAILS'), - ('serializer', 'CELERY_TASK_SERIALIZER'), - ('rate_limit', 'CELERY_DEFAULT_RATE_LIMIT'), - ('track_started', 'CELERY_TRACK_STARTED'), - ('acks_late', 'CELERY_ACKS_LATE'), - ('ignore_result', 'CELERY_IGNORE_RESULT'), - ('store_errors_even_if_ignored', - 'CELERY_STORE_ERRORS_EVEN_IF_IGNORED'), - ) - - _backend = None # set by backend property. - - __bound__ = False - - # - Tasks are lazily bound, so that configuration is not set - # - until the task is actually used - - @classmethod - def bind(self, app): - was_bound, self.__bound__ = self.__bound__, True - self._app = app - conf = app.conf - self._exec_options = None # clear option cache - - for attr_name, config_name in self.from_config: - if getattr(self, attr_name, None) is None: - setattr(self, attr_name, conf[config_name]) - if self.accept_magic_kwargs is None: - self.accept_magic_kwargs = app.accept_magic_kwargs - - # decorate with annotations from config. - if not was_bound: - self.annotate() - - from celery.utils.threads import LocalStack - self.request_stack = LocalStack() - - # PeriodicTask uses this to add itself to the PeriodicTask schedule. - self.on_bound(app) - - return app - - @classmethod - def on_bound(self, app): - """This method can be defined to do additional actions when the - task class is bound to an app.""" - pass - - @classmethod - def _get_app(self): - if self._app is None: - self._app = current_app - if not self.__bound__: - # The app property's __set__ method is not called - # if Task.app is set (on the class), so must bind on use. - self.bind(self._app) - return self._app - app = class_property(_get_app, bind) - - @classmethod - def annotate(self): - for d in resolve_all_annotations(self.app.annotations, self): - for key, value in items(d): - if key.startswith('@'): - self.add_around(key[1:], value) - else: - setattr(self, key, value) - - @classmethod - def add_around(self, attr, around): - orig = getattr(self, attr) - if getattr(orig, '__wrapped__', None): - orig = orig.__wrapped__ - meth = around(orig) - meth.__wrapped__ = orig - setattr(self, attr, meth) - - def __call__(self, *args, **kwargs): - _task_stack.push(self) - self.push_request() - try: - # add self if this is a bound task - if self.__self__ is not None: - return self.run(self.__self__, *args, **kwargs) - return self.run(*args, **kwargs) - finally: - self.pop_request() - _task_stack.pop() - - def __reduce__(self): - # - tasks are pickled into the name of the task only, and the reciever - # - simply grabs it from the local registry. - # - in later versions the module of the task is also included, - # - and the receiving side tries to import that module so that - # - it will work even if the task has not been registered. - mod = type(self).__module__ - mod = mod if mod and mod in sys.modules else None - return (_unpickle_task_v2, (self.name, mod), None) - - def run(self, *args, **kwargs): - """The body of the task executed by workers.""" - raise NotImplementedError('Tasks must define the run method.') - - def start_strategy(self, app, consumer, **kwargs): - return instantiate(self.Strategy, self, app, consumer, **kwargs) - - def delay(self, *args, **kwargs): - """Star argument version of :meth:`apply_async`. - - Does not support the extra options enabled by :meth:`apply_async`. - - :param \*args: positional arguments passed on to the task. - :param \*\*kwargs: keyword arguments passed on to the task. - - :returns :class:`celery.result.AsyncResult`: - - """ - return self.apply_async(args, kwargs) - - def apply_async(self, args=None, kwargs=None, task_id=None, producer=None, - link=None, link_error=None, **options): - """Apply tasks asynchronously by sending a message. - - :keyword args: The positional arguments to pass on to the - task (a :class:`list` or :class:`tuple`). - - :keyword kwargs: The keyword arguments to pass on to the - task (a :class:`dict`) - - :keyword countdown: Number of seconds into the future that the - task should execute. Defaults to immediate - execution. - - :keyword eta: A :class:`~datetime.datetime` object describing - the absolute time and date of when the task should - be executed. May not be specified if `countdown` - is also supplied. - - :keyword expires: Either a :class:`int`, describing the number of - seconds, or a :class:`~datetime.datetime` object - that describes the absolute time and date of when - the task should expire. The task will not be - executed after the expiration time. - - :keyword connection: Re-use existing broker connection instead - of establishing a new one. - - :keyword retry: If enabled sending of the task message will be retried - in the event of connection loss or failure. Default - is taken from the :setting:`CELERY_TASK_PUBLISH_RETRY` - setting. Note that you need to handle the - producer/connection manually for this to work. - - :keyword retry_policy: Override the retry policy used. See the - :setting:`CELERY_TASK_PUBLISH_RETRY_POLICY` - setting. - - :keyword routing_key: Custom routing key used to route the task to a - worker server. If in combination with a - ``queue`` argument only used to specify custom - routing keys to topic exchanges. - - :keyword queue: The queue to route the task to. This must be a key - present in :setting:`CELERY_QUEUES`, or - :setting:`CELERY_CREATE_MISSING_QUEUES` must be - enabled. See :ref:`guide-routing` for more - information. - - :keyword exchange: Named custom exchange to send the task to. - Usually not used in combination with the ``queue`` - argument. - - :keyword priority: The task priority, a number between 0 and 9. - Defaults to the :attr:`priority` attribute. - - :keyword serializer: A string identifying the default - serialization method to use. Can be `pickle`, - `json`, `yaml`, `msgpack` or any custom - serialization method that has been registered - with :mod:`kombu.serialization.registry`. - Defaults to the :attr:`serializer` attribute. - - :keyword compression: A string identifying the compression method - to use. Can be one of ``zlib``, ``bzip2``, - or any custom compression methods registered with - :func:`kombu.compression.register`. Defaults to - the :setting:`CELERY_MESSAGE_COMPRESSION` - setting. - :keyword link: A single, or a list of tasks to apply if the - task exits successfully. - :keyword link_error: A single, or a list of tasks to apply - if an error occurs while executing the task. - - :keyword producer: :class:~@amqp.TaskProducer` instance to use. - - :keyword add_to_parent: If set to True (default) and the task - is applied while executing another task, then the result - will be appended to the parent tasks ``request.children`` - attribute. Trailing can also be disabled by default using the - :attr:`trail` attribute - - :keyword publisher: Deprecated alias to ``producer``. - - :keyword headers: Message headers to be sent in the - task (a :class:`dict`) - - :rtype :class:`celery.result.AsyncResult`: if - :setting:`CELERY_ALWAYS_EAGER` is not set, otherwise - :class:`celery.result.EagerResult`. - - Also supports all keyword arguments supported by - :meth:`kombu.Producer.publish`. - - .. note:: - If the :setting:`CELERY_ALWAYS_EAGER` setting is set, it will - be replaced by a local :func:`apply` call instead. - - """ - app = self._get_app() - if app.conf.CELERY_ALWAYS_EAGER: - return self.apply(args, kwargs, task_id=task_id or uuid(), - link=link, link_error=link_error, **options) - # add 'self' if this is a "task_method". - if self.__self__ is not None: - args = args if isinstance(args, tuple) else tuple(args or ()) - args = (self.__self__, ) + args - return app.send_task( - self.name, args, kwargs, task_id=task_id, producer=producer, - link=link, link_error=link_error, result_cls=self.AsyncResult, - **dict(self._get_exec_options(), **options) - ) - - def subtask_from_request(self, request=None, args=None, kwargs=None, - queue=None, **extra_options): - request = self.request if request is None else request - args = request.args if args is None else args - kwargs = request.kwargs if kwargs is None else kwargs - limit_hard, limit_soft = request.timelimit or (None, None) - options = { - 'task_id': request.id, - 'link': request.callbacks, - 'link_error': request.errbacks, - 'group_id': request.group, - 'chord': request.chord, - 'soft_time_limit': limit_soft, - 'time_limit': limit_hard, - 'reply_to': request.reply_to, - 'headers': request.headers, - } - options.update( - {'queue': queue} if queue else (request.delivery_info or {}) - ) - return self.subtask(args, kwargs, options, type=self, **extra_options) - - def retry(self, args=None, kwargs=None, exc=None, throw=True, - eta=None, countdown=None, max_retries=None, **options): - """Retry the task. - - :param args: Positional arguments to retry with. - :param kwargs: Keyword arguments to retry with. - :keyword exc: Custom exception to report when the max restart - limit has been exceeded (default: - :exc:`~@MaxRetriesExceededError`). - - If this argument is set and retry is called while - an exception was raised (``sys.exc_info()`` is set) - it will attempt to reraise the current exception. - - If no exception was raised it will raise the ``exc`` - argument provided. - :keyword countdown: Time in seconds to delay the retry for. - :keyword eta: Explicit time and date to run the retry at - (must be a :class:`~datetime.datetime` instance). - :keyword max_retries: If set, overrides the default retry limit for - this execution. Changes to this parameter do not propagate to - subsequent task retry attempts. A value of :const:`None`, means - "use the default", so if you want infinite retries you would - have to set the :attr:`max_retries` attribute of the task to - :const:`None` first. - :keyword time_limit: If set, overrides the default time limit. - :keyword soft_time_limit: If set, overrides the default soft - time limit. - :keyword \*\*options: Any extra options to pass on to - meth:`apply_async`. - :keyword throw: If this is :const:`False`, do not raise the - :exc:`~@Retry` exception, - that tells the worker to mark the task as being - retried. Note that this means the task will be - marked as failed if the task raises an exception, - or successful if it returns. - - :raises celery.exceptions.Retry: To tell the worker that - the task has been re-sent for retry. This always happens, - unless the `throw` keyword argument has been explicitly set - to :const:`False`, and is considered normal operation. - - **Example** - - .. code-block:: python - - >>> from imaginary_twitter_lib import Twitter - >>> from proj.celery import app - - >>> @app.task(bind=True) - ... def tweet(self, auth, message): - ... twitter = Twitter(oauth=auth) - ... try: - ... twitter.post_status_update(message) - ... except twitter.FailWhale as exc: - ... # Retry in 5 minutes. - ... raise self.retry(countdown=60 * 5, exc=exc) - - Although the task will never return above as `retry` raises an - exception to notify the worker, we use `raise` in front of the retry - to convey that the rest of the block will not be executed. - - """ - request = self.request - retries = request.retries + 1 - max_retries = self.max_retries if max_retries is None else max_retries - - # Not in worker or emulated by (apply/always_eager), - # so just raise the original exception. - if request.called_directly: - maybe_reraise() # raise orig stack if PyErr_Occurred - raise exc or Retry('Task can be retried', None) - - if not eta and countdown is None: - countdown = self.default_retry_delay - - is_eager = request.is_eager - S = self.subtask_from_request( - request, args, kwargs, - countdown=countdown, eta=eta, retries=retries, - **options - ) - - if max_retries is not None and retries > max_retries: - if exc: - # first try to reraise the original exception - maybe_reraise() - # or if not in an except block then raise the custom exc. - raise exc - raise self.MaxRetriesExceededError( - "Can't retry {0}[{1}] args:{2} kwargs:{3}".format( - self.name, request.id, S.args, S.kwargs)) - - ret = Retry(exc=exc, when=eta or countdown) - - if is_eager: - # if task was executed eagerly using apply(), - # then the retry must also be executed eagerly. - S.apply().get() - return ret - - try: - S.apply_async() - except Exception as exc: - raise Reject(exc, requeue=False) - if throw: - raise ret - return ret - - def apply(self, args=None, kwargs=None, - link=None, link_error=None, **options): - """Execute this task locally, by blocking until the task returns. - - :param args: positional arguments passed on to the task. - :param kwargs: keyword arguments passed on to the task. - :keyword throw: Re-raise task exceptions. Defaults to - the :setting:`CELERY_EAGER_PROPAGATES_EXCEPTIONS` - setting. - - :rtype :class:`celery.result.EagerResult`: - - """ - # trace imports Task, so need to import inline. - from celery.app.trace import eager_trace_task - - app = self._get_app() - args = args or () - # add 'self' if this is a bound method. - if self.__self__ is not None: - args = (self.__self__, ) + tuple(args) - kwargs = kwargs or {} - task_id = options.get('task_id') or uuid() - retries = options.get('retries', 0) - throw = app.either('CELERY_EAGER_PROPAGATES_EXCEPTIONS', - options.pop('throw', None)) - - # Make sure we get the task instance, not class. - task = app._tasks[self.name] - - request = {'id': task_id, - 'retries': retries, - 'is_eager': True, - 'logfile': options.get('logfile'), - 'loglevel': options.get('loglevel', 0), - 'callbacks': maybe_list(link), - 'errbacks': maybe_list(link_error), - 'headers': options.get('headers'), - 'delivery_info': {'is_eager': True}} - if self.accept_magic_kwargs: - default_kwargs = {'task_name': task.name, - 'task_id': task_id, - 'task_retries': retries, - 'task_is_eager': True, - 'logfile': options.get('logfile'), - 'loglevel': options.get('loglevel', 0), - 'delivery_info': {'is_eager': True}} - supported_keys = fun_takes_kwargs(task.run, default_kwargs) - extend_with = dict((key, val) - for key, val in items(default_kwargs) - if key in supported_keys) - kwargs.update(extend_with) - - tb = None - retval, info = eager_trace_task(task, task_id, args, kwargs, - app=self._get_app(), - request=request, propagate=throw) - if isinstance(retval, ExceptionInfo): - retval, tb = retval.exception, retval.traceback - state = states.SUCCESS if info is None else info.state - return EagerResult(task_id, retval, state, traceback=tb) - - def AsyncResult(self, task_id, **kwargs): - """Get AsyncResult instance for this kind of task. - - :param task_id: Task id to get result for. - - """ - return self._get_app().AsyncResult(task_id, backend=self.backend, - task_name=self.name, **kwargs) - - def subtask(self, args=None, *starargs, **starkwargs): - """Return :class:`~celery.signature` object for - this task, wrapping arguments and execution options - for a single task invocation.""" - starkwargs.setdefault('app', self.app) - return signature(self, args, *starargs, **starkwargs) - - def s(self, *args, **kwargs): - """``.s(*a, **k) -> .subtask(a, k)``""" - return self.subtask(args, kwargs) - - def si(self, *args, **kwargs): - """``.si(*a, **k) -> .subtask(a, k, immutable=True)``""" - return self.subtask(args, kwargs, immutable=True) - - def chunks(self, it, n): - """Creates a :class:`~celery.canvas.chunks` task for this task.""" - from celery import chunks - return chunks(self.s(), it, n, app=self.app) - - def map(self, it): - """Creates a :class:`~celery.canvas.xmap` task from ``it``.""" - from celery import xmap - return xmap(self.s(), it, app=self.app) - - def starmap(self, it): - """Creates a :class:`~celery.canvas.xstarmap` task from ``it``.""" - from celery import xstarmap - return xstarmap(self.s(), it, app=self.app) - - def send_event(self, type_, **fields): - req = self.request - with self.app.events.default_dispatcher(hostname=req.hostname) as d: - return d.send(type_, uuid=req.id, **fields) - - def update_state(self, task_id=None, state=None, meta=None): - """Update task state. - - :keyword task_id: Id of the task to update, defaults to the - id of the current task - :keyword state: New state (:class:`str`). - :keyword meta: State metadata (:class:`dict`). - - - - """ - if task_id is None: - task_id = self.request.id - self.backend.store_result(task_id, meta, state) - - def on_success(self, retval, task_id, args, kwargs): - """Success handler. - - Run by the worker if the task executes successfully. - - :param retval: The return value of the task. - :param task_id: Unique id of the executed task. - :param args: Original arguments for the executed task. - :param kwargs: Original keyword arguments for the executed task. - - The return value of this handler is ignored. - - """ - pass - - def on_retry(self, exc, task_id, args, kwargs, einfo): - """Retry handler. - - This is run by the worker when the task is to be retried. - - :param exc: The exception sent to :meth:`retry`. - :param task_id: Unique id of the retried task. - :param args: Original arguments for the retried task. - :param kwargs: Original keyword arguments for the retried task. - - :keyword einfo: :class:`~billiard.einfo.ExceptionInfo` - instance, containing the traceback. - - The return value of this handler is ignored. - - """ - pass - - def on_failure(self, exc, task_id, args, kwargs, einfo): - """Error handler. - - This is run by the worker when the task fails. - - :param exc: The exception raised by the task. - :param task_id: Unique id of the failed task. - :param args: Original arguments for the task that failed. - :param kwargs: Original keyword arguments for the task - that failed. - - :keyword einfo: :class:`~billiard.einfo.ExceptionInfo` - instance, containing the traceback. - - The return value of this handler is ignored. - - """ - pass - - def after_return(self, status, retval, task_id, args, kwargs, einfo): - """Handler called after the task returns. - - :param status: Current task state. - :param retval: Task return value/exception. - :param task_id: Unique id of the task. - :param args: Original arguments for the task. - :param kwargs: Original keyword arguments for the task. - - :keyword einfo: :class:`~billiard.einfo.ExceptionInfo` - instance, containing the traceback (if any). - - The return value of this handler is ignored. - - """ - pass - - def send_error_email(self, context, exc, **kwargs): - if self.send_error_emails and \ - not getattr(self, 'disable_error_emails', None): - self.ErrorMail(self, **kwargs).send(context, exc) - - def add_trail(self, result): - if self.trail: - self.request.children.append(result) - return result - - def push_request(self, *args, **kwargs): - self.request_stack.push(Context(*args, **kwargs)) - - def pop_request(self): - self.request_stack.pop() - - def __repr__(self): - """`repr(task)`""" - return _reprtask(self, R_SELF_TASK if self.__self__ else R_INSTANCE) - - def _get_request(self): - """Get current request object.""" - req = self.request_stack.top - if req is None: - # task was not called, but some may still expect a request - # to be there, perhaps that should be deprecated. - if self._default_request is None: - self._default_request = Context() - return self._default_request - return req - request = property(_get_request) - - def _get_exec_options(self): - if self._exec_options is None: - self._exec_options = extract_exec_options(self) - return self._exec_options - - @property - def backend(self): - backend = self._backend - if backend is None: - return self.app.backend - return backend - - @backend.setter - def backend(self, value): # noqa - self._backend = value - - @property - def __name__(self): - return self.__class__.__name__ -BaseTask = Task # compat alias diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/trace.py b/thesisenv/lib/python3.6/site-packages/celery/app/trace.py deleted file mode 100644 index feea0e8..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/trace.py +++ /dev/null @@ -1,441 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.app.trace - ~~~~~~~~~~~~~~~~ - - This module defines how the task execution is traced: - errors are recorded, handlers are applied and so on. - -""" -from __future__ import absolute_import - -# ## --- -# This is the heart of the worker, the inner loop so to speak. -# It used to be split up into nice little classes and methods, -# but in the end it only resulted in bad performance and horrible tracebacks, -# so instead we now use one closure per task class. - -import os -import socket -import sys - -from warnings import warn - -from billiard.einfo import ExceptionInfo -from kombu.exceptions import EncodeError -from kombu.utils import kwdict - -from celery import current_app, group -from celery import states, signals -from celery._state import _task_stack -from celery.app import set_default_app -from celery.app.task import Task as BaseTask, Context -from celery.exceptions import Ignore, Reject, Retry -from celery.utils.log import get_logger -from celery.utils.objects import mro_lookup -from celery.utils.serialization import ( - get_pickleable_exception, - get_pickleable_etype, -) - -__all__ = ['TraceInfo', 'build_tracer', 'trace_task', 'eager_trace_task', - 'setup_worker_optimizations', 'reset_worker_optimizations'] - -_logger = get_logger(__name__) - -send_prerun = signals.task_prerun.send -send_postrun = signals.task_postrun.send -send_success = signals.task_success.send -STARTED = states.STARTED -SUCCESS = states.SUCCESS -IGNORED = states.IGNORED -REJECTED = states.REJECTED -RETRY = states.RETRY -FAILURE = states.FAILURE -EXCEPTION_STATES = states.EXCEPTION_STATES -IGNORE_STATES = frozenset([IGNORED, RETRY, REJECTED]) - -#: set by :func:`setup_worker_optimizations` -_tasks = None -_patched = {} - - -def task_has_custom(task, attr): - """Return true if the task or one of its bases - defines ``attr`` (excluding the one in BaseTask).""" - return mro_lookup(task.__class__, attr, stop=(BaseTask, object), - monkey_patched=['celery.app.task']) - - -class TraceInfo(object): - __slots__ = ('state', 'retval') - - def __init__(self, state, retval=None): - self.state = state - self.retval = retval - - def handle_error_state(self, task, eager=False): - store_errors = not eager - if task.ignore_result: - store_errors = task.store_errors_even_if_ignored - - return { - RETRY: self.handle_retry, - FAILURE: self.handle_failure, - }[self.state](task, store_errors=store_errors) - - def handle_retry(self, task, store_errors=True): - """Handle retry exception.""" - # the exception raised is the Retry semi-predicate, - # and it's exc' attribute is the original exception raised (if any). - req = task.request - type_, _, tb = sys.exc_info() - try: - reason = self.retval - einfo = ExceptionInfo((type_, reason, tb)) - if store_errors: - task.backend.mark_as_retry( - req.id, reason.exc, einfo.traceback, request=req, - ) - task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo) - signals.task_retry.send(sender=task, request=req, - reason=reason, einfo=einfo) - return einfo - finally: - del(tb) - - def handle_failure(self, task, store_errors=True): - """Handle exception.""" - req = task.request - type_, _, tb = sys.exc_info() - try: - exc = self.retval - einfo = ExceptionInfo() - einfo.exception = get_pickleable_exception(einfo.exception) - einfo.type = get_pickleable_etype(einfo.type) - if store_errors: - task.backend.mark_as_failure( - req.id, exc, einfo.traceback, request=req, - ) - task.on_failure(exc, req.id, req.args, req.kwargs, einfo) - signals.task_failure.send(sender=task, task_id=req.id, - exception=exc, args=req.args, - kwargs=req.kwargs, - traceback=tb, - einfo=einfo) - return einfo - finally: - del(tb) - - -def build_tracer(name, task, loader=None, hostname=None, store_errors=True, - Info=TraceInfo, eager=False, propagate=False, app=None, - IGNORE_STATES=IGNORE_STATES): - """Return a function that traces task execution; catches all - exceptions and updates result backend with the state and result - - If the call was successful, it saves the result to the task result - backend, and sets the task status to `"SUCCESS"`. - - If the call raises :exc:`~@Retry`, it extracts - the original exception, uses that as the result and sets the task state - to `"RETRY"`. - - If the call results in an exception, it saves the exception as the task - result, and sets the task state to `"FAILURE"`. - - Return a function that takes the following arguments: - - :param uuid: The id of the task. - :param args: List of positional args to pass on to the function. - :param kwargs: Keyword arguments mapping to pass on to the function. - :keyword request: Request dict. - - """ - # If the task doesn't define a custom __call__ method - # we optimize it away by simply calling the run method directly, - # saving the extra method call and a line less in the stack trace. - fun = task if task_has_custom(task, '__call__') else task.run - - loader = loader or app.loader - backend = task.backend - ignore_result = task.ignore_result - track_started = task.track_started - track_started = not eager and (task.track_started and not ignore_result) - publish_result = not eager and not ignore_result - hostname = hostname or socket.gethostname() - - loader_task_init = loader.on_task_init - loader_cleanup = loader.on_process_cleanup - - task_on_success = None - task_after_return = None - if task_has_custom(task, 'on_success'): - task_on_success = task.on_success - if task_has_custom(task, 'after_return'): - task_after_return = task.after_return - - store_result = backend.store_result - backend_cleanup = backend.process_cleanup - - pid = os.getpid() - - request_stack = task.request_stack - push_request = request_stack.push - pop_request = request_stack.pop - push_task = _task_stack.push - pop_task = _task_stack.pop - on_chord_part_return = backend.on_chord_part_return - - prerun_receivers = signals.task_prerun.receivers - postrun_receivers = signals.task_postrun.receivers - success_receivers = signals.task_success.receivers - - from celery import canvas - signature = canvas.maybe_signature # maybe_ does not clone if already - - def on_error(request, exc, uuid, state=FAILURE, call_errbacks=True): - if propagate: - raise - I = Info(state, exc) - R = I.handle_error_state(task, eager=eager) - if call_errbacks: - group( - [signature(errback, app=app) - for errback in request.errbacks or []], app=app, - ).apply_async((uuid, )) - return I, R, I.state, I.retval - - def trace_task(uuid, args, kwargs, request=None): - # R - is the possibly prepared return value. - # I - is the Info object. - # retval - is the always unmodified return value. - # state - is the resulting task state. - - # This function is very long because we have unrolled all the calls - # for performance reasons, and because the function is so long - # we want the main variables (I, and R) to stand out visually from the - # the rest of the variables, so breaking PEP8 is worth it ;) - R = I = retval = state = None - kwargs = kwdict(kwargs) - try: - push_task(task) - task_request = Context(request or {}, args=args, - called_directly=False, kwargs=kwargs) - push_request(task_request) - try: - # -*- PRE -*- - if prerun_receivers: - send_prerun(sender=task, task_id=uuid, task=task, - args=args, kwargs=kwargs) - loader_task_init(uuid, task) - if track_started: - store_result( - uuid, {'pid': pid, 'hostname': hostname}, STARTED, - request=task_request, - ) - - # -*- TRACE -*- - try: - R = retval = fun(*args, **kwargs) - state = SUCCESS - except Reject as exc: - I, R = Info(REJECTED, exc), ExceptionInfo(internal=True) - state, retval = I.state, I.retval - except Ignore as exc: - I, R = Info(IGNORED, exc), ExceptionInfo(internal=True) - state, retval = I.state, I.retval - except Retry as exc: - I, R, state, retval = on_error( - task_request, exc, uuid, RETRY, call_errbacks=False, - ) - except Exception as exc: - I, R, state, retval = on_error(task_request, exc, uuid) - except BaseException as exc: - raise - else: - try: - # callback tasks must be applied before the result is - # stored, so that result.children is populated. - - # groups are called inline and will store trail - # separately, so need to call them separately - # so that the trail's not added multiple times :( - # (Issue #1936) - callbacks = task.request.callbacks - if callbacks: - if len(task.request.callbacks) > 1: - sigs, groups = [], [] - for sig in callbacks: - sig = signature(sig, app=app) - if isinstance(sig, group): - groups.append(sig) - else: - sigs.append(sig) - for group_ in groups: - group_.apply_async((retval, )) - if sigs: - group(sigs).apply_async((retval, )) - else: - signature(callbacks[0], app=app).delay(retval) - if publish_result: - store_result( - uuid, retval, SUCCESS, request=task_request, - ) - except EncodeError as exc: - I, R, state, retval = on_error(task_request, exc, uuid) - else: - if task_on_success: - task_on_success(retval, uuid, args, kwargs) - if success_receivers: - send_success(sender=task, result=retval) - - # -* POST *- - if state not in IGNORE_STATES: - if task_request.chord: - on_chord_part_return(task, state, R) - if task_after_return: - task_after_return( - state, retval, uuid, args, kwargs, None, - ) - finally: - try: - if postrun_receivers: - send_postrun(sender=task, task_id=uuid, task=task, - args=args, kwargs=kwargs, - retval=retval, state=state) - finally: - pop_task() - pop_request() - if not eager: - try: - backend_cleanup() - loader_cleanup() - except (KeyboardInterrupt, SystemExit, MemoryError): - raise - except Exception as exc: - _logger.error('Process cleanup failed: %r', exc, - exc_info=True) - except MemoryError: - raise - except Exception as exc: - if eager: - raise - R = report_internal_error(task, exc) - return R, I - - return trace_task - - -def trace_task(task, uuid, args, kwargs, request={}, **opts): - try: - if task.__trace__ is None: - task.__trace__ = build_tracer(task.name, task, **opts) - return task.__trace__(uuid, args, kwargs, request)[0] - except Exception as exc: - return report_internal_error(task, exc) - - -def _trace_task_ret(name, uuid, args, kwargs, request={}, app=None, **opts): - app = app or current_app - return trace_task(app.tasks[name], - uuid, args, kwargs, request, app=app, **opts) -trace_task_ret = _trace_task_ret - - -def _fast_trace_task(task, uuid, args, kwargs, request={}): - # setup_worker_optimizations will point trace_task_ret to here, - # so this is the function used in the worker. - return _tasks[task].__trace__(uuid, args, kwargs, request)[0] - - -def eager_trace_task(task, uuid, args, kwargs, request=None, **opts): - opts.setdefault('eager', True) - return build_tracer(task.name, task, **opts)( - uuid, args, kwargs, request) - - -def report_internal_error(task, exc): - _type, _value, _tb = sys.exc_info() - try: - _value = task.backend.prepare_exception(exc, 'pickle') - exc_info = ExceptionInfo((_type, _value, _tb), internal=True) - warn(RuntimeWarning( - 'Exception raised outside body: {0!r}:\n{1}'.format( - exc, exc_info.traceback))) - return exc_info - finally: - del(_tb) - - -def setup_worker_optimizations(app): - global _tasks - global trace_task_ret - - # make sure custom Task.__call__ methods that calls super - # will not mess up the request/task stack. - _install_stack_protection() - - # all new threads start without a current app, so if an app is not - # passed on to the thread it will fall back to the "default app", - # which then could be the wrong app. So for the worker - # we set this to always return our app. This is a hack, - # and means that only a single app can be used for workers - # running in the same process. - app.set_current() - set_default_app(app) - - # evaluate all task classes by finalizing the app. - app.finalize() - - # set fast shortcut to task registry - _tasks = app._tasks - - trace_task_ret = _fast_trace_task - from celery.worker import job as job_module - job_module.trace_task_ret = _fast_trace_task - job_module.__optimize__() - - -def reset_worker_optimizations(): - global trace_task_ret - trace_task_ret = _trace_task_ret - try: - delattr(BaseTask, '_stackprotected') - except AttributeError: - pass - try: - BaseTask.__call__ = _patched.pop('BaseTask.__call__') - except KeyError: - pass - from celery.worker import job as job_module - job_module.trace_task_ret = _trace_task_ret - - -def _install_stack_protection(): - # Patches BaseTask.__call__ in the worker to handle the edge case - # where people override it and also call super. - # - # - The worker optimizes away BaseTask.__call__ and instead - # calls task.run directly. - # - so with the addition of current_task and the request stack - # BaseTask.__call__ now pushes to those stacks so that - # they work when tasks are called directly. - # - # The worker only optimizes away __call__ in the case - # where it has not been overridden, so the request/task stack - # will blow if a custom task class defines __call__ and also - # calls super(). - if not getattr(BaseTask, '_stackprotected', False): - _patched['BaseTask.__call__'] = orig = BaseTask.__call__ - - def __protected_call__(self, *args, **kwargs): - stack = self.request_stack - req = stack.top - if req and not req._protected and \ - len(stack) == 1 and not req.called_directly: - req._protected = 1 - return self.run(*args, **kwargs) - return orig(self, *args, **kwargs) - BaseTask.__call__ = __protected_call__ - BaseTask._stackprotected = True diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/utils.py b/thesisenv/lib/python3.6/site-packages/celery/app/utils.py deleted file mode 100644 index b76290b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/app/utils.py +++ /dev/null @@ -1,266 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.app.utils - ~~~~~~~~~~~~~~~~ - - App utilities: Compat settings, bugreport tool, pickling apps. - -""" -from __future__ import absolute_import - -import os -import platform as _platform -import re - -from collections import Mapping -from types import ModuleType - -from kombu.utils.url import maybe_sanitize_url - -from celery.datastructures import ConfigurationView -from celery.five import items, string_t, values -from celery.platforms import pyimplementation -from celery.utils.text import pretty -from celery.utils.imports import import_from_cwd, symbol_by_name, qualname - -from .defaults import find - -__all__ = ['Settings', 'appstr', 'bugreport', - 'filter_hidden_settings', 'find_app'] - -#: Format used to generate bugreport information. -BUGREPORT_INFO = """ -software -> celery:{celery_v} kombu:{kombu_v} py:{py_v} - billiard:{billiard_v} {driver_v} -platform -> system:{system} arch:{arch} imp:{py_i} -loader -> {loader} -settings -> transport:{transport} results:{results} - -{human_settings} -""" - -HIDDEN_SETTINGS = re.compile( - 'API|TOKEN|KEY|SECRET|PASS|PROFANITIES_LIST|SIGNATURE|DATABASE', - re.IGNORECASE, -) - - -def appstr(app): - """String used in __repr__ etc, to id app instances.""" - return '{0}:0x{1:x}'.format(app.main or '__main__', id(app)) - - -class Settings(ConfigurationView): - """Celery settings object. - - .. seealso: - - :ref:`configuration` for a full list of configuration keys. - - """ - - @property - def CELERY_RESULT_BACKEND(self): - return self.first('CELERY_RESULT_BACKEND', 'CELERY_BACKEND') - - @property - def BROKER_TRANSPORT(self): - return self.first('BROKER_TRANSPORT', - 'BROKER_BACKEND', 'CARROT_BACKEND') - - @property - def BROKER_BACKEND(self): - """Deprecated compat alias to :attr:`BROKER_TRANSPORT`.""" - return self.BROKER_TRANSPORT - - @property - def BROKER_URL(self): - return (os.environ.get('CELERY_BROKER_URL') or - self.first('BROKER_URL', 'BROKER_HOST')) - - @property - def CELERY_TIMEZONE(self): - # this way we also support django's time zone. - return self.first('CELERY_TIMEZONE', 'TIME_ZONE') - - def without_defaults(self): - """Return the current configuration, but without defaults.""" - # the last stash is the default settings, so just skip that - return Settings({}, self._order[:-1]) - - def value_set_for(self, key): - return key in self.without_defaults() - - def find_option(self, name, namespace='celery'): - """Search for option by name. - - Will return ``(namespace, key, type)`` tuple, e.g.:: - - >>> from proj.celery import app - >>> app.conf.find_option('disable_rate_limits') - ('CELERY', 'DISABLE_RATE_LIMITS', - bool default->False>)) - - :param name: Name of option, cannot be partial. - :keyword namespace: Preferred namespace (``CELERY`` by default). - - """ - return find(name, namespace) - - def find_value_for_key(self, name, namespace='celery'): - """Shortcut to ``get_by_parts(*find_option(name)[:-1])``""" - return self.get_by_parts(*self.find_option(name, namespace)[:-1]) - - def get_by_parts(self, *parts): - """Return the current value for setting specified as a path. - - Example:: - - >>> from proj.celery import app - >>> app.conf.get_by_parts('CELERY', 'DISABLE_RATE_LIMITS') - False - - """ - return self['_'.join(part for part in parts if part)] - - def table(self, with_defaults=False, censored=True): - filt = filter_hidden_settings if censored else lambda v: v - return filt(dict( - (k, v) for k, v in items( - self if with_defaults else self.without_defaults()) - if k.isupper() and not k.startswith('_') - )) - - def humanize(self, with_defaults=False, censored=True): - """Return a human readable string showing changes to the - configuration.""" - return '\n'.join( - '{0}: {1}'.format(key, pretty(value, width=50)) - for key, value in items(self.table(with_defaults, censored))) - - -class AppPickler(object): - """Old application pickler/unpickler (< 3.1).""" - - def __call__(self, cls, *args): - kwargs = self.build_kwargs(*args) - app = self.construct(cls, **kwargs) - self.prepare(app, **kwargs) - return app - - def prepare(self, app, **kwargs): - app.conf.update(kwargs['changes']) - - def build_kwargs(self, *args): - return self.build_standard_kwargs(*args) - - def build_standard_kwargs(self, main, changes, loader, backend, amqp, - events, log, control, accept_magic_kwargs, - config_source=None): - return dict(main=main, loader=loader, backend=backend, amqp=amqp, - changes=changes, events=events, log=log, control=control, - set_as_current=False, - accept_magic_kwargs=accept_magic_kwargs, - config_source=config_source) - - def construct(self, cls, **kwargs): - return cls(**kwargs) - - -def _unpickle_app(cls, pickler, *args): - """Rebuild app for versions 2.5+""" - return pickler()(cls, *args) - - -def _unpickle_app_v2(cls, kwargs): - """Rebuild app for versions 3.1+""" - kwargs['set_as_current'] = False - return cls(**kwargs) - - -def filter_hidden_settings(conf): - - def maybe_censor(key, value, mask='*' * 8): - if isinstance(value, Mapping): - return filter_hidden_settings(value) - if isinstance(key, string_t): - if HIDDEN_SETTINGS.search(key): - return mask - elif 'BROKER_URL' in key.upper(): - from kombu import Connection - return Connection(value).as_uri(mask=mask) - elif key.upper() in ('CELERY_RESULT_BACKEND', 'CELERY_BACKEND'): - return maybe_sanitize_url(value, mask=mask) - - return value - - return dict((k, maybe_censor(k, v)) for k, v in items(conf)) - - -def bugreport(app): - """Return a string containing information useful in bug reports.""" - import billiard - import celery - import kombu - - try: - conn = app.connection() - driver_v = '{0}:{1}'.format(conn.transport.driver_name, - conn.transport.driver_version()) - transport = conn.transport_cls - except Exception: - transport = driver_v = '' - - return BUGREPORT_INFO.format( - system=_platform.system(), - arch=', '.join(x for x in _platform.architecture() if x), - py_i=pyimplementation(), - celery_v=celery.VERSION_BANNER, - kombu_v=kombu.__version__, - billiard_v=billiard.__version__, - py_v=_platform.python_version(), - driver_v=driver_v, - transport=transport, - results=maybe_sanitize_url( - app.conf.CELERY_RESULT_BACKEND or 'disabled'), - human_settings=app.conf.humanize(), - loader=qualname(app.loader.__class__), - ) - - -def find_app(app, symbol_by_name=symbol_by_name, imp=import_from_cwd): - from .base import Celery - - try: - sym = symbol_by_name(app, imp=imp) - except AttributeError: - # last part was not an attribute, but a module - sym = imp(app) - if isinstance(sym, ModuleType) and ':' not in app: - try: - found = sym.app - if isinstance(found, ModuleType): - raise AttributeError() - except AttributeError: - try: - found = sym.celery - if isinstance(found, ModuleType): - raise AttributeError() - except AttributeError: - if getattr(sym, '__path__', None): - try: - return find_app( - '{0}.celery'.format(app), - symbol_by_name=symbol_by_name, imp=imp, - ) - except ImportError: - pass - for suspect in values(vars(sym)): - if isinstance(suspect, Celery): - return suspect - raise - else: - return found - else: - return found - return sym diff --git a/thesisenv/lib/python3.6/site-packages/celery/apps/beat.py b/thesisenv/lib/python3.6/site-packages/celery/apps/beat.py deleted file mode 100644 index 46cef9b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/apps/beat.py +++ /dev/null @@ -1,151 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.apps.beat - ~~~~~~~~~~~~~~~~ - - This module is the 'program-version' of :mod:`celery.beat`. - - It does everything necessary to run that module - as an actual application, like installing signal handlers - and so on. - -""" -from __future__ import absolute_import, unicode_literals - -import numbers -import socket -import sys - -from celery import VERSION_BANNER, platforms, beat -from celery.utils.imports import qualname -from celery.utils.log import LOG_LEVELS, get_logger -from celery.utils.timeutils import humanize_seconds - -__all__ = ['Beat'] - -STARTUP_INFO_FMT = """ -Configuration -> - . broker -> {conninfo} - . loader -> {loader} - . scheduler -> {scheduler} -{scheduler_info} - . logfile -> {logfile}@%{loglevel} - . maxinterval -> {hmax_interval} ({max_interval}s) -""".strip() - -logger = get_logger('celery.beat') - - -class Beat(object): - Service = beat.Service - app = None - - def __init__(self, max_interval=None, app=None, - socket_timeout=30, pidfile=None, no_color=None, - loglevel=None, logfile=None, schedule=None, - scheduler_cls=None, redirect_stdouts=None, - redirect_stdouts_level=None, **kwargs): - """Starts the beat task scheduler.""" - self.app = app = app or self.app - self.loglevel = self._getopt('log_level', loglevel) - self.logfile = self._getopt('log_file', logfile) - self.schedule = self._getopt('schedule_filename', schedule) - self.scheduler_cls = self._getopt('scheduler', scheduler_cls) - self.redirect_stdouts = self._getopt( - 'redirect_stdouts', redirect_stdouts, - ) - self.redirect_stdouts_level = self._getopt( - 'redirect_stdouts_level', redirect_stdouts_level, - ) - - self.max_interval = max_interval - self.socket_timeout = socket_timeout - self.no_color = no_color - self.colored = app.log.colored( - self.logfile, - enabled=not no_color if no_color is not None else no_color, - ) - self.pidfile = pidfile - - if not isinstance(self.loglevel, numbers.Integral): - self.loglevel = LOG_LEVELS[self.loglevel.upper()] - - def _getopt(self, key, value): - if value is not None: - return value - return self.app.conf.find_value_for_key(key, namespace='celerybeat') - - def run(self): - print(str(self.colored.cyan( - 'celery beat v{0} is starting.'.format(VERSION_BANNER)))) - self.init_loader() - self.set_process_title() - self.start_scheduler() - - def setup_logging(self, colorize=None): - if colorize is None and self.no_color is not None: - colorize = not self.no_color - self.app.log.setup(self.loglevel, self.logfile, - self.redirect_stdouts, self.redirect_stdouts_level, - colorize=colorize) - - def start_scheduler(self): - c = self.colored - if self.pidfile: - platforms.create_pidlock(self.pidfile) - beat = self.Service(app=self.app, - max_interval=self.max_interval, - scheduler_cls=self.scheduler_cls, - schedule_filename=self.schedule) - - print(str(c.blue('__ ', c.magenta('-'), - c.blue(' ... __ '), c.magenta('-'), - c.blue(' _\n'), - c.reset(self.startup_info(beat))))) - self.setup_logging() - if self.socket_timeout: - logger.debug('Setting default socket timeout to %r', - self.socket_timeout) - socket.setdefaulttimeout(self.socket_timeout) - try: - self.install_sync_handler(beat) - beat.start() - except Exception as exc: - logger.critical('beat raised exception %s: %r', - exc.__class__, exc, - exc_info=True) - - def init_loader(self): - # Run the worker init handler. - # (Usually imports task modules and such.) - self.app.loader.init_worker() - self.app.finalize() - - def startup_info(self, beat): - scheduler = beat.get_scheduler(lazy=True) - return STARTUP_INFO_FMT.format( - conninfo=self.app.connection().as_uri(), - logfile=self.logfile or '[stderr]', - loglevel=LOG_LEVELS[self.loglevel], - loader=qualname(self.app.loader), - scheduler=qualname(scheduler), - scheduler_info=scheduler.info, - hmax_interval=humanize_seconds(beat.max_interval), - max_interval=beat.max_interval, - ) - - def set_process_title(self): - arg_start = 'manage' in sys.argv[0] and 2 or 1 - platforms.set_process_title( - 'celery beat', info=' '.join(sys.argv[arg_start:]), - ) - - def install_sync_handler(self, beat): - """Install a `SIGTERM` + `SIGINT` handler that saves - the beat schedule.""" - - def _sync(signum, frame): - beat.sync() - raise SystemExit() - - platforms.signals.update(SIGTERM=_sync, SIGINT=_sync) diff --git a/thesisenv/lib/python3.6/site-packages/celery/apps/worker.py b/thesisenv/lib/python3.6/site-packages/celery/apps/worker.py deleted file mode 100644 index 637a082..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/apps/worker.py +++ /dev/null @@ -1,372 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.apps.worker - ~~~~~~~~~~~~~~~~~~ - - This module is the 'program-version' of :mod:`celery.worker`. - - It does everything necessary to run that module - as an actual application, like installing signal handlers, - platform tweaks, and so on. - -""" -from __future__ import absolute_import, print_function, unicode_literals - -import logging -import os -import platform as _platform -import sys -import warnings - -from functools import partial - -from billiard import current_process -from kombu.utils.encoding import safe_str - -from celery import VERSION_BANNER, platforms, signals -from celery.app import trace -from celery.exceptions import ( - CDeprecationWarning, WorkerShutdown, WorkerTerminate, -) -from celery.five import string, string_t -from celery.loaders.app import AppLoader -from celery.platforms import check_privileges -from celery.utils import cry, isatty -from celery.utils.imports import qualname -from celery.utils.log import get_logger, in_sighandler, set_in_sighandler -from celery.utils.text import pluralize -from celery.worker import WorkController - -__all__ = ['Worker'] - -logger = get_logger(__name__) -is_jython = sys.platform.startswith('java') -is_pypy = hasattr(sys, 'pypy_version_info') - -W_PICKLE_DEPRECATED = """ -Starting from version 3.2 Celery will refuse to accept pickle by default. - -The pickle serializer is a security concern as it may give attackers -the ability to execute any command. It's important to secure -your broker from unauthorized access when using pickle, so we think -that enabling pickle should require a deliberate action and not be -the default choice. - -If you depend on pickle then you should set a setting to disable this -warning and to be sure that everything will continue working -when you upgrade to Celery 3.2:: - - CELERY_ACCEPT_CONTENT = ['pickle', 'json', 'msgpack', 'yaml'] - -You must only enable the serializers that you will actually use. - -""" - - -def active_thread_count(): - from threading import enumerate - return sum(1 for t in enumerate() - if not t.name.startswith('Dummy-')) - - -def safe_say(msg): - print('\n{0}'.format(msg), file=sys.__stderr__) - -ARTLINES = [ - ' --------------', - '---- **** -----', - '--- * *** * --', - '-- * - **** ---', - '- ** ----------', - '- ** ----------', - '- ** ----------', - '- ** ----------', - '- *** --- * ---', - '-- ******* ----', - '--- ***** -----', - ' --------------', -] - -BANNER = """\ -{hostname} v{version} - -{platform} - -[config] -.> app: {app} -.> transport: {conninfo} -.> results: {results} -.> concurrency: {concurrency} - -[queues] -{queues} -""" - -EXTRA_INFO_FMT = """ -[tasks] -{tasks} -""" - - -class Worker(WorkController): - - def on_before_init(self, **kwargs): - trace.setup_worker_optimizations(self.app) - - # this signal can be used to set up configuration for - # workers by name. - signals.celeryd_init.send( - sender=self.hostname, instance=self, - conf=self.app.conf, options=kwargs, - ) - check_privileges(self.app.conf.CELERY_ACCEPT_CONTENT) - - def on_after_init(self, purge=False, no_color=None, - redirect_stdouts=None, redirect_stdouts_level=None, - **kwargs): - self.redirect_stdouts = self._getopt( - 'redirect_stdouts', redirect_stdouts, - ) - self.redirect_stdouts_level = self._getopt( - 'redirect_stdouts_level', redirect_stdouts_level, - ) - super(Worker, self).setup_defaults(**kwargs) - self.purge = purge - self.no_color = no_color - self._isatty = isatty(sys.stdout) - self.colored = self.app.log.colored( - self.logfile, - enabled=not no_color if no_color is not None else no_color - ) - - def on_init_blueprint(self): - self._custom_logging = self.setup_logging() - # apply task execution optimizations - # -- This will finalize the app! - trace.setup_worker_optimizations(self.app) - - def on_start(self): - if not self._custom_logging and self.redirect_stdouts: - self.app.log.redirect_stdouts(self.redirect_stdouts_level) - - WorkController.on_start(self) - - # this signal can be used to e.g. change queues after - # the -Q option has been applied. - signals.celeryd_after_setup.send( - sender=self.hostname, instance=self, conf=self.app.conf, - ) - - if not self.app.conf.value_set_for('CELERY_ACCEPT_CONTENT'): - warnings.warn(CDeprecationWarning(W_PICKLE_DEPRECATED)) - - if self.purge: - self.purge_messages() - - # Dump configuration to screen so we have some basic information - # for when users sends bug reports. - print(safe_str(''.join([ - string(self.colored.cyan(' \n', self.startup_info())), - string(self.colored.reset(self.extra_info() or '')), - ])), file=sys.__stdout__) - self.set_process_status('-active-') - self.install_platform_tweaks(self) - - def on_consumer_ready(self, consumer): - signals.worker_ready.send(sender=consumer) - print('{0} ready.'.format(safe_str(self.hostname), )) - - def setup_logging(self, colorize=None): - if colorize is None and self.no_color is not None: - colorize = not self.no_color - return self.app.log.setup( - self.loglevel, self.logfile, - redirect_stdouts=False, colorize=colorize, hostname=self.hostname, - ) - - def purge_messages(self): - count = self.app.control.purge() - if count: - print('purge: Erased {0} {1} from the queue.\n'.format( - count, pluralize(count, 'message'))) - - def tasklist(self, include_builtins=True, sep='\n', int_='celery.'): - return sep.join( - ' . {0}'.format(task) for task in sorted(self.app.tasks) - if (not task.startswith(int_) if not include_builtins else task) - ) - - def extra_info(self): - if self.loglevel <= logging.INFO: - include_builtins = self.loglevel <= logging.DEBUG - tasklist = self.tasklist(include_builtins=include_builtins) - return EXTRA_INFO_FMT.format(tasks=tasklist) - - def startup_info(self): - app = self.app - concurrency = string(self.concurrency) - appr = '{0}:0x{1:x}'.format(app.main or '__main__', id(app)) - if not isinstance(app.loader, AppLoader): - loader = qualname(app.loader) - if loader.startswith('celery.loaders'): - loader = loader[14:] - appr += ' ({0})'.format(loader) - if self.autoscale: - max, min = self.autoscale - concurrency = '{{min={0}, max={1}}}'.format(min, max) - pool = self.pool_cls - if not isinstance(pool, string_t): - pool = pool.__module__ - concurrency += ' ({0})'.format(pool.split('.')[-1]) - events = 'ON' - if not self.send_events: - events = 'OFF (enable -E to monitor this worker)' - - banner = BANNER.format( - app=appr, - hostname=safe_str(self.hostname), - version=VERSION_BANNER, - conninfo=self.app.connection().as_uri(), - results=self.app.backend.as_uri(), - concurrency=concurrency, - platform=safe_str(_platform.platform()), - events=events, - queues=app.amqp.queues.format(indent=0, indent_first=False), - ).splitlines() - - # integrate the ASCII art. - for i, x in enumerate(banner): - try: - banner[i] = ' '.join([ARTLINES[i], banner[i]]) - except IndexError: - banner[i] = ' ' * 16 + banner[i] - return '\n'.join(banner) + '\n' - - def install_platform_tweaks(self, worker): - """Install platform specific tweaks and workarounds.""" - if self.app.IS_OSX: - self.osx_proxy_detection_workaround() - - # Install signal handler so SIGHUP restarts the worker. - if not self._isatty: - # only install HUP handler if detached from terminal, - # so closing the terminal window doesn't restart the worker - # into the background. - if self.app.IS_OSX: - # OS X can't exec from a process using threads. - # See http://github.com/celery/celery/issues#issue/152 - install_HUP_not_supported_handler(worker) - else: - install_worker_restart_handler(worker) - install_worker_term_handler(worker) - install_worker_term_hard_handler(worker) - install_worker_int_handler(worker) - install_cry_handler() - install_rdb_handler() - - def osx_proxy_detection_workaround(self): - """See http://github.com/celery/celery/issues#issue/161""" - os.environ.setdefault('celery_dummy_proxy', 'set_by_celeryd') - - def set_process_status(self, info): - return platforms.set_mp_process_title( - 'celeryd', - info='{0} ({1})'.format(info, platforms.strargv(sys.argv)), - hostname=self.hostname, - ) - - -def _shutdown_handler(worker, sig='TERM', how='Warm', - exc=WorkerShutdown, callback=None): - - def _handle_request(*args): - with in_sighandler(): - from celery.worker import state - if current_process()._name == 'MainProcess': - if callback: - callback(worker) - safe_say('worker: {0} shutdown (MainProcess)'.format(how)) - if active_thread_count() > 1: - setattr(state, {'Warm': 'should_stop', - 'Cold': 'should_terminate'}[how], True) - else: - raise exc() - _handle_request.__name__ = str('worker_{0}'.format(how)) - platforms.signals[sig] = _handle_request -install_worker_term_handler = partial( - _shutdown_handler, sig='SIGTERM', how='Warm', exc=WorkerShutdown, -) -if not is_jython: # pragma: no cover - install_worker_term_hard_handler = partial( - _shutdown_handler, sig='SIGQUIT', how='Cold', exc=WorkerTerminate, - ) -else: # pragma: no cover - install_worker_term_handler = \ - install_worker_term_hard_handler = lambda *a, **kw: None - - -def on_SIGINT(worker): - safe_say('worker: Hitting Ctrl+C again will terminate all running tasks!') - install_worker_term_hard_handler(worker, sig='SIGINT') -if not is_jython: # pragma: no cover - install_worker_int_handler = partial( - _shutdown_handler, sig='SIGINT', callback=on_SIGINT - ) -else: # pragma: no cover - def install_worker_int_handler(*a, **kw): - pass - - -def _reload_current_worker(): - platforms.close_open_fds([ - sys.__stdin__, sys.__stdout__, sys.__stderr__, - ]) - os.execv(sys.executable, [sys.executable] + sys.argv) - - -def install_worker_restart_handler(worker, sig='SIGHUP'): - - def restart_worker_sig_handler(*args): - """Signal handler restarting the current python program.""" - set_in_sighandler(True) - safe_say('Restarting celery worker ({0})'.format(' '.join(sys.argv))) - import atexit - atexit.register(_reload_current_worker) - from celery.worker import state - state.should_stop = True - platforms.signals[sig] = restart_worker_sig_handler - - -def install_cry_handler(sig='SIGUSR1'): - # Jython/PyPy does not have sys._current_frames - if is_jython or is_pypy: # pragma: no cover - return - - def cry_handler(*args): - """Signal handler logging the stacktrace of all active threads.""" - with in_sighandler(): - safe_say(cry()) - platforms.signals[sig] = cry_handler - - -def install_rdb_handler(envvar='CELERY_RDBSIG', - sig='SIGUSR2'): # pragma: no cover - - def rdb_handler(*args): - """Signal handler setting a rdb breakpoint at the current frame.""" - with in_sighandler(): - from celery.contrib.rdb import set_trace, _frame - # gevent does not pass standard signal handler args - frame = args[1] if args else _frame().f_back - set_trace(frame) - if os.environ.get(envvar): - platforms.signals[sig] = rdb_handler - - -def install_HUP_not_supported_handler(worker, sig='SIGHUP'): - - def warn_on_HUP_handler(signum, frame): - with in_sighandler(): - safe_say('{sig} not supported: Restarting with {sig} is ' - 'unstable on this platform!'.format(sig=sig)) - platforms.signals[sig] = warn_on_HUP_handler diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/backends/__init__.py deleted file mode 100644 index 44ee3b7..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/backends/__init__.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.backends - ~~~~~~~~~~~~~~~ - - Backend abstract factory (...did I just say that?) and alias definitions. - -""" -from __future__ import absolute_import - -import sys -import types - -from celery.exceptions import ImproperlyConfigured -from celery.local import Proxy -from celery._state import current_app -from celery.five import reraise -from celery.utils.imports import symbol_by_name - -__all__ = ['get_backend_cls', 'get_backend_by_url'] - -UNKNOWN_BACKEND = """\ -Unknown result backend: {0!r}. Did you spell that correctly? ({1!r})\ -""" - -BACKEND_ALIASES = { - 'amqp': 'celery.backends.amqp:AMQPBackend', - 'rpc': 'celery.backends.rpc.RPCBackend', - 'cache': 'celery.backends.cache:CacheBackend', - 'redis': 'celery.backends.redis:RedisBackend', - 'mongodb': 'celery.backends.mongodb:MongoBackend', - 'db': 'celery.backends.database:DatabaseBackend', - 'database': 'celery.backends.database:DatabaseBackend', - 'cassandra': 'celery.backends.cassandra:CassandraBackend', - 'couchbase': 'celery.backends.couchbase:CouchBaseBackend', - 'disabled': 'celery.backends.base:DisabledBackend', -} - -#: deprecated alias to ``current_app.backend``. -default_backend = Proxy(lambda: current_app.backend) - - -def get_backend_cls(backend=None, loader=None): - """Get backend class by name/alias""" - backend = backend or 'disabled' - loader = loader or current_app.loader - aliases = dict(BACKEND_ALIASES, **loader.override_backends) - try: - cls = symbol_by_name(backend, aliases) - except ValueError as exc: - reraise(ImproperlyConfigured, ImproperlyConfigured( - UNKNOWN_BACKEND.format(backend, exc)), sys.exc_info()[2]) - if isinstance(cls, types.ModuleType): - raise ImproperlyConfigured(UNKNOWN_BACKEND.format( - backend, 'is a Python module, not a backend class.')) - return cls - - -def get_backend_by_url(backend=None, loader=None): - url = None - if backend and '://' in backend: - url = backend - scheme, _, _ = url.partition('://') - if '+' in scheme: - backend, url = url.split('+', 1) - else: - backend = scheme - return get_backend_cls(backend, loader), url diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/amqp.py b/thesisenv/lib/python3.6/site-packages/celery/backends/amqp.py deleted file mode 100644 index 6e7f778..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/backends/amqp.py +++ /dev/null @@ -1,317 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.backends.amqp - ~~~~~~~~~~~~~~~~~~~~ - - The AMQP result backend. - - This backend publishes results as messages. - -""" -from __future__ import absolute_import - -import socket - -from collections import deque -from operator import itemgetter - -from kombu import Exchange, Queue, Producer, Consumer - -from celery import states -from celery.exceptions import TimeoutError -from celery.five import range, monotonic -from celery.utils.functional import dictfilter -from celery.utils.log import get_logger -from celery.utils.timeutils import maybe_s_to_ms - -from .base import BaseBackend - -__all__ = ['BacklogLimitExceeded', 'AMQPBackend'] - -logger = get_logger(__name__) - - -class BacklogLimitExceeded(Exception): - """Too much state history to fast-forward.""" - - -def repair_uuid(s): - # Historically the dashes in UUIDS are removed from AMQ entity names, - # but there is no known reason to. Hopefully we'll be able to fix - # this in v4.0. - return '%s-%s-%s-%s-%s' % (s[:8], s[8:12], s[12:16], s[16:20], s[20:]) - - -class NoCacheQueue(Queue): - can_cache_declaration = False - - -class AMQPBackend(BaseBackend): - """Publishes results by sending messages.""" - Exchange = Exchange - Queue = NoCacheQueue - Consumer = Consumer - Producer = Producer - - BacklogLimitExceeded = BacklogLimitExceeded - - persistent = True - supports_autoexpire = True - supports_native_join = True - - retry_policy = { - 'max_retries': 20, - 'interval_start': 0, - 'interval_step': 1, - 'interval_max': 1, - } - - def __init__(self, app, connection=None, exchange=None, exchange_type=None, - persistent=None, serializer=None, auto_delete=True, **kwargs): - super(AMQPBackend, self).__init__(app, **kwargs) - conf = self.app.conf - self._connection = connection - self.persistent = self.prepare_persistent(persistent) - self.delivery_mode = 2 if self.persistent else 1 - exchange = exchange or conf.CELERY_RESULT_EXCHANGE - exchange_type = exchange_type or conf.CELERY_RESULT_EXCHANGE_TYPE - self.exchange = self._create_exchange( - exchange, exchange_type, self.delivery_mode, - ) - self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER - self.auto_delete = auto_delete - - self.expires = None - if 'expires' not in kwargs or kwargs['expires'] is not None: - self.expires = self.prepare_expires(kwargs.get('expires')) - self.queue_arguments = dictfilter({ - 'x-expires': maybe_s_to_ms(self.expires), - }) - - def _create_exchange(self, name, type='direct', delivery_mode=2): - return self.Exchange(name=name, - type=type, - delivery_mode=delivery_mode, - durable=self.persistent, - auto_delete=False) - - def _create_binding(self, task_id): - name = self.rkey(task_id) - return self.Queue(name=name, - exchange=self.exchange, - routing_key=name, - durable=self.persistent, - auto_delete=self.auto_delete, - queue_arguments=self.queue_arguments) - - def revive(self, channel): - pass - - def rkey(self, task_id): - return task_id.replace('-', '') - - def destination_for(self, task_id, request): - if request: - return self.rkey(task_id), request.correlation_id or task_id - return self.rkey(task_id), task_id - - def store_result(self, task_id, result, status, - traceback=None, request=None, **kwargs): - """Send task return value and status.""" - routing_key, correlation_id = self.destination_for(task_id, request) - if not routing_key: - return - with self.app.amqp.producer_pool.acquire(block=True) as producer: - producer.publish( - {'task_id': task_id, 'status': status, - 'result': self.encode_result(result, status), - 'traceback': traceback, - 'children': self.current_task_children(request)}, - exchange=self.exchange, - routing_key=routing_key, - correlation_id=correlation_id, - serializer=self.serializer, - retry=True, retry_policy=self.retry_policy, - declare=self.on_reply_declare(task_id), - delivery_mode=self.delivery_mode, - ) - return result - - def on_reply_declare(self, task_id): - return [self._create_binding(task_id)] - - def wait_for(self, task_id, timeout=None, cache=True, - no_ack=True, on_interval=None, - READY_STATES=states.READY_STATES, - PROPAGATE_STATES=states.PROPAGATE_STATES, - **kwargs): - cached_meta = self._cache.get(task_id) - if cache and cached_meta and \ - cached_meta['status'] in READY_STATES: - return cached_meta - else: - try: - return self.consume(task_id, timeout=timeout, no_ack=no_ack, - on_interval=on_interval) - except socket.timeout: - raise TimeoutError('The operation timed out.') - - def get_task_meta(self, task_id, backlog_limit=1000): - # Polling and using basic_get - with self.app.pool.acquire_channel(block=True) as (_, channel): - binding = self._create_binding(task_id)(channel) - binding.declare() - - prev = latest = acc = None - for i in range(backlog_limit): # spool ffwd - acc = binding.get( - accept=self.accept, no_ack=False, - ) - if not acc: # no more messages - break - if acc.payload['task_id'] == task_id: - prev, latest = latest, acc - if prev: - # backends are not expected to keep history, - # so we delete everything except the most recent state. - prev.ack() - prev = None - else: - raise self.BacklogLimitExceeded(task_id) - - if latest: - payload = self._cache[task_id] = \ - self.meta_from_decoded(latest.payload) - latest.requeue() - return payload - else: - # no new state, use previous - try: - return self._cache[task_id] - except KeyError: - # result probably pending. - return {'status': states.PENDING, 'result': None} - poll = get_task_meta # XXX compat - - def drain_events(self, connection, consumer, - timeout=None, on_interval=None, now=monotonic, wait=None): - wait = wait or connection.drain_events - results = {} - - def callback(meta, message): - if meta['status'] in states.READY_STATES: - results[meta['task_id']] = self.meta_from_decoded(meta) - - consumer.callbacks[:] = [callback] - time_start = now() - - while 1: - # Total time spent may exceed a single call to wait() - if timeout and now() - time_start >= timeout: - raise socket.timeout() - try: - wait(timeout=1) - except socket.timeout: - pass - if on_interval: - on_interval() - if results: # got event on the wanted channel. - break - self._cache.update(results) - return results - - def consume(self, task_id, timeout=None, no_ack=True, on_interval=None): - wait = self.drain_events - with self.app.pool.acquire_channel(block=True) as (conn, channel): - binding = self._create_binding(task_id) - with self.Consumer(channel, binding, - no_ack=no_ack, accept=self.accept) as consumer: - while 1: - try: - return wait( - conn, consumer, timeout, on_interval)[task_id] - except KeyError: - continue - - def _many_bindings(self, ids): - return [self._create_binding(task_id) for task_id in ids] - - def get_many(self, task_ids, timeout=None, no_ack=True, - now=monotonic, getfields=itemgetter('status', 'task_id'), - READY_STATES=states.READY_STATES, - PROPAGATE_STATES=states.PROPAGATE_STATES, **kwargs): - with self.app.pool.acquire_channel(block=True) as (conn, channel): - ids = set(task_ids) - cached_ids = set() - mark_cached = cached_ids.add - for task_id in ids: - try: - cached = self._cache[task_id] - except KeyError: - pass - else: - if cached['status'] in READY_STATES: - yield task_id, cached - mark_cached(task_id) - ids.difference_update(cached_ids) - results = deque() - push_result = results.append - push_cache = self._cache.__setitem__ - decode_result = self.meta_from_decoded - - def on_message(message): - body = decode_result(message.decode()) - state, uid = getfields(body) - if state in READY_STATES: - push_result(body) \ - if uid in task_ids else push_cache(uid, body) - - bindings = self._many_bindings(task_ids) - with self.Consumer(channel, bindings, on_message=on_message, - accept=self.accept, no_ack=no_ack): - wait = conn.drain_events - popleft = results.popleft - while ids: - wait(timeout=timeout) - while results: - state = popleft() - task_id = state['task_id'] - ids.discard(task_id) - push_cache(task_id, state) - yield task_id, state - - def reload_task_result(self, task_id): - raise NotImplementedError( - 'reload_task_result is not supported by this backend.') - - def reload_group_result(self, task_id): - """Reload group result, even if it has been previously fetched.""" - raise NotImplementedError( - 'reload_group_result is not supported by this backend.') - - def save_group(self, group_id, result): - raise NotImplementedError( - 'save_group is not supported by this backend.') - - def restore_group(self, group_id, cache=True): - raise NotImplementedError( - 'restore_group is not supported by this backend.') - - def delete_group(self, group_id): - raise NotImplementedError( - 'delete_group is not supported by this backend.') - - def as_uri(self, include_password=True): - return 'amqp://' - - def __reduce__(self, args=(), kwargs={}): - kwargs.update( - connection=self._connection, - exchange=self.exchange.name, - exchange_type=self.exchange.type, - persistent=self.persistent, - serializer=self.serializer, - auto_delete=self.auto_delete, - expires=self.expires, - ) - return super(AMQPBackend, self).__reduce__(args, kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/base.py b/thesisenv/lib/python3.6/site-packages/celery/backends/base.py deleted file mode 100644 index 03b6909..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/backends/base.py +++ /dev/null @@ -1,623 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.backends.base - ~~~~~~~~~~~~~~~~~~~~ - - Result backend base classes. - - - :class:`BaseBackend` defines the interface. - - - :class:`KeyValueStoreBackend` is a common base class - using K/V semantics like _get and _put. - -""" -from __future__ import absolute_import - -import time -import sys - -from datetime import timedelta - -from billiard.einfo import ExceptionInfo -from kombu.serialization import ( - dumps, loads, prepare_accept_content, - registry as serializer_registry, -) -from kombu.utils.encoding import bytes_to_str, ensure_bytes, from_utf8 -from kombu.utils.url import maybe_sanitize_url - -from celery import states -from celery import current_app, maybe_signature -from celery.app import current_task -from celery.exceptions import ChordError, TimeoutError, TaskRevokedError -from celery.five import items -from celery.result import ( - GroupResult, ResultBase, allow_join_result, result_from_tuple, -) -from celery.utils import timeutils -from celery.utils.functional import LRUCache -from celery.utils.log import get_logger -from celery.utils.serialization import ( - get_pickled_exception, - get_pickleable_exception, - create_exception_cls, -) - -__all__ = ['BaseBackend', 'KeyValueStoreBackend', 'DisabledBackend'] - -EXCEPTION_ABLE_CODECS = frozenset(['pickle']) -PY3 = sys.version_info >= (3, 0) - -logger = get_logger(__name__) - - -def unpickle_backend(cls, args, kwargs): - """Return an unpickled backend.""" - return cls(*args, app=current_app._get_current_object(), **kwargs) - - -class _nulldict(dict): - - def ignore(self, *a, **kw): - pass - __setitem__ = update = setdefault = ignore - - -class BaseBackend(object): - READY_STATES = states.READY_STATES - UNREADY_STATES = states.UNREADY_STATES - EXCEPTION_STATES = states.EXCEPTION_STATES - - TimeoutError = TimeoutError - - #: Time to sleep between polling each individual item - #: in `ResultSet.iterate`. as opposed to the `interval` - #: argument which is for each pass. - subpolling_interval = None - - #: If true the backend must implement :meth:`get_many`. - supports_native_join = False - - #: If true the backend must automatically expire results. - #: The daily backend_cleanup periodic task will not be triggered - #: in this case. - supports_autoexpire = False - - #: Set to true if the backend is peristent by default. - persistent = True - - retry_policy = { - 'max_retries': 20, - 'interval_start': 0, - 'interval_step': 1, - 'interval_max': 1, - } - - def __init__(self, app, - serializer=None, max_cached_results=None, accept=None, - url=None, **kwargs): - self.app = app - conf = self.app.conf - self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER - (self.content_type, - self.content_encoding, - self.encoder) = serializer_registry._encoders[self.serializer] - cmax = max_cached_results or conf.CELERY_MAX_CACHED_RESULTS - self._cache = _nulldict() if cmax == -1 else LRUCache(limit=cmax) - self.accept = prepare_accept_content( - conf.CELERY_ACCEPT_CONTENT if accept is None else accept, - ) - self.url = url - - def as_uri(self, include_password=False): - """Return the backend as an URI, sanitizing the password or not""" - # when using maybe_sanitize_url(), "/" is added - # we're stripping it for consistency - if include_password: - return self.url - url = maybe_sanitize_url(self.url or '') - return url[:-1] if url.endswith(':///') else url - - def mark_as_started(self, task_id, **meta): - """Mark a task as started""" - return self.store_result(task_id, meta, status=states.STARTED) - - def mark_as_done(self, task_id, result, request=None): - """Mark task as successfully executed.""" - return self.store_result(task_id, result, - status=states.SUCCESS, request=request) - - def mark_as_failure(self, task_id, exc, traceback=None, request=None): - """Mark task as executed with failure. Stores the exception.""" - return self.store_result(task_id, exc, status=states.FAILURE, - traceback=traceback, request=request) - - def chord_error_from_stack(self, callback, exc=None): - from celery import group - app = self.app - backend = app._tasks[callback.task].backend - try: - group( - [app.signature(errback) - for errback in callback.options.get('link_error') or []], - app=app, - ).apply_async((callback.id, )) - except Exception as eb_exc: - return backend.fail_from_current_stack(callback.id, exc=eb_exc) - else: - return backend.fail_from_current_stack(callback.id, exc=exc) - - def fail_from_current_stack(self, task_id, exc=None): - type_, real_exc, tb = sys.exc_info() - try: - exc = real_exc if exc is None else exc - ei = ExceptionInfo((type_, exc, tb)) - self.mark_as_failure(task_id, exc, ei.traceback) - return ei - finally: - del(tb) - - def mark_as_retry(self, task_id, exc, traceback=None, request=None): - """Mark task as being retries. Stores the current - exception (if any).""" - return self.store_result(task_id, exc, status=states.RETRY, - traceback=traceback, request=request) - - def mark_as_revoked(self, task_id, reason='', request=None): - return self.store_result(task_id, TaskRevokedError(reason), - status=states.REVOKED, traceback=None, - request=request) - - def prepare_exception(self, exc, serializer=None): - """Prepare exception for serialization.""" - serializer = self.serializer if serializer is None else serializer - if serializer in EXCEPTION_ABLE_CODECS: - return get_pickleable_exception(exc) - return {'exc_type': type(exc).__name__, 'exc_message': str(exc)} - - def exception_to_python(self, exc): - """Convert serialized exception to Python exception.""" - if exc: - if not isinstance(exc, BaseException): - exc = create_exception_cls( - from_utf8(exc['exc_type']), __name__)(exc['exc_message']) - if self.serializer in EXCEPTION_ABLE_CODECS: - exc = get_pickled_exception(exc) - return exc - - def prepare_value(self, result): - """Prepare value for storage.""" - if self.serializer != 'pickle' and isinstance(result, ResultBase): - return result.as_tuple() - return result - - def encode(self, data): - _, _, payload = dumps(data, serializer=self.serializer) - return payload - - def meta_from_decoded(self, meta): - if meta['status'] in self.EXCEPTION_STATES: - meta['result'] = self.exception_to_python(meta['result']) - return meta - - def decode_result(self, payload): - return self.meta_from_decoded(self.decode(payload)) - - def decode(self, payload): - payload = PY3 and payload or str(payload) - return loads(payload, - content_type=self.content_type, - content_encoding=self.content_encoding, - accept=self.accept) - - def wait_for(self, task_id, - timeout=None, interval=0.5, no_ack=True, on_interval=None): - """Wait for task and return its result. - - If the task raises an exception, this exception - will be re-raised by :func:`wait_for`. - - If `timeout` is not :const:`None`, this raises the - :class:`celery.exceptions.TimeoutError` exception if the operation - takes longer than `timeout` seconds. - - """ - - time_elapsed = 0.0 - - while 1: - meta = self.get_task_meta(task_id) - if meta['status'] in states.READY_STATES: - return meta - if on_interval: - on_interval() - # avoid hammering the CPU checking status. - time.sleep(interval) - time_elapsed += interval - if timeout and time_elapsed >= timeout: - raise TimeoutError('The operation timed out.') - - def prepare_expires(self, value, type=None): - if value is None: - value = self.app.conf.CELERY_TASK_RESULT_EXPIRES - if isinstance(value, timedelta): - value = timeutils.timedelta_seconds(value) - if value is not None and type: - return type(value) - return value - - def prepare_persistent(self, enabled=None): - if enabled is not None: - return enabled - p = self.app.conf.CELERY_RESULT_PERSISTENT - return self.persistent if p is None else p - - def encode_result(self, result, status): - if isinstance(result, ExceptionInfo): - result = result.exception - if status in self.EXCEPTION_STATES and isinstance(result, Exception): - return self.prepare_exception(result) - else: - return self.prepare_value(result) - - def is_cached(self, task_id): - return task_id in self._cache - - def store_result(self, task_id, result, status, - traceback=None, request=None, **kwargs): - """Update task state and result.""" - result = self.encode_result(result, status) - self._store_result(task_id, result, status, traceback, - request=request, **kwargs) - return result - - def forget(self, task_id): - self._cache.pop(task_id, None) - self._forget(task_id) - - def _forget(self, task_id): - raise NotImplementedError('backend does not implement forget.') - - def get_status(self, task_id): - """Get the status of a task.""" - return self.get_task_meta(task_id)['status'] - - def get_traceback(self, task_id): - """Get the traceback for a failed task.""" - return self.get_task_meta(task_id).get('traceback') - - def get_result(self, task_id): - """Get the result of a task.""" - return self.get_task_meta(task_id).get('result') - - def get_children(self, task_id): - """Get the list of subtasks sent by a task.""" - try: - return self.get_task_meta(task_id)['children'] - except KeyError: - pass - - def get_task_meta(self, task_id, cache=True): - if cache: - try: - return self._cache[task_id] - except KeyError: - pass - - meta = self._get_task_meta_for(task_id) - if cache and meta.get('status') == states.SUCCESS: - self._cache[task_id] = meta - return meta - - def reload_task_result(self, task_id): - """Reload task result, even if it has been previously fetched.""" - self._cache[task_id] = self.get_task_meta(task_id, cache=False) - - def reload_group_result(self, group_id): - """Reload group result, even if it has been previously fetched.""" - self._cache[group_id] = self.get_group_meta(group_id, cache=False) - - def get_group_meta(self, group_id, cache=True): - if cache: - try: - return self._cache[group_id] - except KeyError: - pass - - meta = self._restore_group(group_id) - if cache and meta is not None: - self._cache[group_id] = meta - return meta - - def restore_group(self, group_id, cache=True): - """Get the result for a group.""" - meta = self.get_group_meta(group_id, cache=cache) - if meta: - return meta['result'] - - def save_group(self, group_id, result): - """Store the result of an executed group.""" - return self._save_group(group_id, result) - - def delete_group(self, group_id): - self._cache.pop(group_id, None) - return self._delete_group(group_id) - - def cleanup(self): - """Backend cleanup. Is run by - :class:`celery.task.DeleteExpiredTaskMetaTask`.""" - pass - - def process_cleanup(self): - """Cleanup actions to do at the end of a task worker process.""" - pass - - def on_task_call(self, producer, task_id): - return {} - - def on_chord_part_return(self, task, state, result, propagate=False): - pass - - def fallback_chord_unlock(self, group_id, body, result=None, - countdown=1, **kwargs): - kwargs['result'] = [r.as_tuple() for r in result] - self.app.tasks['celery.chord_unlock'].apply_async( - (group_id, body, ), kwargs, countdown=countdown, - ) - - def apply_chord(self, header, partial_args, group_id, body, **options): - result = header(*partial_args, task_id=group_id) - self.fallback_chord_unlock(group_id, body, **options) - return result - - def current_task_children(self, request=None): - request = request or getattr(current_task(), 'request', None) - if request: - return [r.as_tuple() for r in getattr(request, 'children', [])] - - def __reduce__(self, args=(), kwargs={}): - return (unpickle_backend, (self.__class__, args, kwargs)) -BaseDictBackend = BaseBackend # XXX compat - - -class KeyValueStoreBackend(BaseBackend): - key_t = ensure_bytes - task_keyprefix = 'celery-task-meta-' - group_keyprefix = 'celery-taskset-meta-' - chord_keyprefix = 'chord-unlock-' - implements_incr = False - - def __init__(self, *args, **kwargs): - if hasattr(self.key_t, '__func__'): - self.key_t = self.key_t.__func__ # remove binding - self._encode_prefixes() - super(KeyValueStoreBackend, self).__init__(*args, **kwargs) - if self.implements_incr: - self.apply_chord = self._apply_chord_incr - - def _encode_prefixes(self): - self.task_keyprefix = self.key_t(self.task_keyprefix) - self.group_keyprefix = self.key_t(self.group_keyprefix) - self.chord_keyprefix = self.key_t(self.chord_keyprefix) - - def get(self, key): - raise NotImplementedError('Must implement the get method.') - - def mget(self, keys): - raise NotImplementedError('Does not support get_many') - - def set(self, key, value): - raise NotImplementedError('Must implement the set method.') - - def delete(self, key): - raise NotImplementedError('Must implement the delete method') - - def incr(self, key): - raise NotImplementedError('Does not implement incr') - - def expire(self, key, value): - pass - - def get_key_for_task(self, task_id, key=''): - """Get the cache key for a task by id.""" - key_t = self.key_t - return key_t('').join([ - self.task_keyprefix, key_t(task_id), key_t(key), - ]) - - def get_key_for_group(self, group_id, key=''): - """Get the cache key for a group by id.""" - key_t = self.key_t - return key_t('').join([ - self.group_keyprefix, key_t(group_id), key_t(key), - ]) - - def get_key_for_chord(self, group_id, key=''): - """Get the cache key for the chord waiting on group with given id.""" - key_t = self.key_t - return key_t('').join([ - self.chord_keyprefix, key_t(group_id), key_t(key), - ]) - - def _strip_prefix(self, key): - """Takes bytes, emits string.""" - key = self.key_t(key) - for prefix in self.task_keyprefix, self.group_keyprefix: - if key.startswith(prefix): - return bytes_to_str(key[len(prefix):]) - return bytes_to_str(key) - - def _filter_ready(self, values, READY_STATES=states.READY_STATES): - for k, v in values: - if v is not None: - v = self.decode_result(v) - if v['status'] in READY_STATES: - yield k, v - - def _mget_to_results(self, values, keys): - if hasattr(values, 'items'): - # client returns dict so mapping preserved. - return dict((self._strip_prefix(k), v) - for k, v in self._filter_ready(items(values))) - else: - # client returns list so need to recreate mapping. - return dict((bytes_to_str(keys[i]), v) - for i, v in self._filter_ready(enumerate(values))) - - def get_many(self, task_ids, timeout=None, interval=0.5, no_ack=True, - READY_STATES=states.READY_STATES): - interval = 0.5 if interval is None else interval - ids = task_ids if isinstance(task_ids, set) else set(task_ids) - cached_ids = set() - cache = self._cache - for task_id in ids: - try: - cached = cache[task_id] - except KeyError: - pass - else: - if cached['status'] in READY_STATES: - yield bytes_to_str(task_id), cached - cached_ids.add(task_id) - - ids.difference_update(cached_ids) - iterations = 0 - while ids: - keys = list(ids) - r = self._mget_to_results(self.mget([self.get_key_for_task(k) - for k in keys]), keys) - cache.update(r) - ids.difference_update(set(bytes_to_str(v) for v in r)) - for key, value in items(r): - yield bytes_to_str(key), value - if timeout and iterations * interval >= timeout: - raise TimeoutError('Operation timed out ({0})'.format(timeout)) - time.sleep(interval) # don't busy loop. - iterations += 1 - - def _forget(self, task_id): - self.delete(self.get_key_for_task(task_id)) - - def _store_result(self, task_id, result, status, - traceback=None, request=None, **kwargs): - meta = {'status': status, 'result': result, 'traceback': traceback, - 'children': self.current_task_children(request)} - self.set(self.get_key_for_task(task_id), self.encode(meta)) - return result - - def _save_group(self, group_id, result): - self.set(self.get_key_for_group(group_id), - self.encode({'result': result.as_tuple()})) - return result - - def _delete_group(self, group_id): - self.delete(self.get_key_for_group(group_id)) - - def _get_task_meta_for(self, task_id): - """Get task metadata for a task by id.""" - meta = self.get(self.get_key_for_task(task_id)) - if not meta: - return {'status': states.PENDING, 'result': None} - return self.decode_result(meta) - - def _restore_group(self, group_id): - """Get task metadata for a task by id.""" - meta = self.get(self.get_key_for_group(group_id)) - # previously this was always pickled, but later this - # was extended to support other serializers, so the - # structure is kind of weird. - if meta: - meta = self.decode(meta) - result = meta['result'] - meta['result'] = result_from_tuple(result, self.app) - return meta - - def _apply_chord_incr(self, header, partial_args, group_id, body, - result=None, **options): - self.save_group(group_id, self.app.GroupResult(group_id, result)) - return header(*partial_args, task_id=group_id) - - def on_chord_part_return(self, task, state, result, propagate=None): - if not self.implements_incr: - return - app = self.app - if propagate is None: - propagate = app.conf.CELERY_CHORD_PROPAGATES - gid = task.request.group - if not gid: - return - key = self.get_key_for_chord(gid) - try: - deps = GroupResult.restore(gid, backend=task.backend) - except Exception as exc: - callback = maybe_signature(task.request.chord, app=app) - logger.error('Chord %r raised: %r', gid, exc, exc_info=1) - return self.chord_error_from_stack( - callback, - ChordError('Cannot restore group: {0!r}'.format(exc)), - ) - if deps is None: - try: - raise ValueError(gid) - except ValueError as exc: - callback = maybe_signature(task.request.chord, app=app) - logger.error('Chord callback %r raised: %r', gid, exc, - exc_info=1) - return self.chord_error_from_stack( - callback, - ChordError('GroupResult {0} no longer exists'.format(gid)), - ) - val = self.incr(key) - size = len(deps) - if val > size: - logger.warning('Chord counter incremented too many times for %r', - gid) - elif val == size: - callback = maybe_signature(task.request.chord, app=app) - j = deps.join_native if deps.supports_native_join else deps.join - try: - with allow_join_result(): - ret = j(timeout=3.0, propagate=propagate) - except Exception as exc: - try: - culprit = next(deps._failed_join_report()) - reason = 'Dependency {0.id} raised {1!r}'.format( - culprit, exc, - ) - except StopIteration: - reason = repr(exc) - - logger.error('Chord %r raised: %r', gid, reason, exc_info=1) - self.chord_error_from_stack(callback, ChordError(reason)) - else: - try: - callback.delay(ret) - except Exception as exc: - logger.error('Chord %r raised: %r', gid, exc, exc_info=1) - self.chord_error_from_stack( - callback, - ChordError('Callback error: {0!r}'.format(exc)), - ) - finally: - deps.delete() - self.client.delete(key) - else: - self.expire(key, 86400) - - -class DisabledBackend(BaseBackend): - _cache = {} # need this attribute to reset cache in tests. - - def store_result(self, *args, **kwargs): - pass - - def _is_disabled(self, *args, **kwargs): - raise NotImplementedError( - 'No result backend configured. ' - 'Please see the documentation for more information.') - - def as_uri(self, *args, **kwargs): - return 'disabled://' - - get_state = get_status = get_result = get_traceback = _is_disabled - wait_for = get_many = _is_disabled diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/cache.py b/thesisenv/lib/python3.6/site-packages/celery/backends/cache.py deleted file mode 100644 index 3c8230c..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/backends/cache.py +++ /dev/null @@ -1,161 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.backends.cache - ~~~~~~~~~~~~~~~~~~~~~ - - Memcache and in-memory cache result backend. - -""" -from __future__ import absolute_import - -import sys - -from kombu.utils import cached_property -from kombu.utils.encoding import bytes_to_str, ensure_bytes - -from celery.exceptions import ImproperlyConfigured -from celery.utils.functional import LRUCache - -from .base import KeyValueStoreBackend - -__all__ = ['CacheBackend'] - -_imp = [None] - -PY3 = sys.version_info[0] == 3 - -REQUIRES_BACKEND = """\ -The memcached backend requires either pylibmc or python-memcached.\ -""" - -UNKNOWN_BACKEND = """\ -The cache backend {0!r} is unknown, -Please use one of the following backends instead: {1}\ -""" - - -def import_best_memcache(): - if _imp[0] is None: - is_pylibmc, memcache_key_t = False, ensure_bytes - try: - import pylibmc as memcache - is_pylibmc = True - except ImportError: - try: - import memcache # noqa - except ImportError: - raise ImproperlyConfigured(REQUIRES_BACKEND) - if PY3: - memcache_key_t = bytes_to_str - _imp[0] = (is_pylibmc, memcache, memcache_key_t) - return _imp[0] - - -def get_best_memcache(*args, **kwargs): - is_pylibmc, memcache, key_t = import_best_memcache() - Client = _Client = memcache.Client - - if not is_pylibmc: - def Client(*args, **kwargs): # noqa - kwargs.pop('behaviors', None) - return _Client(*args, **kwargs) - - return Client, key_t - - -class DummyClient(object): - - def __init__(self, *args, **kwargs): - self.cache = LRUCache(limit=5000) - - def get(self, key, *args, **kwargs): - return self.cache.get(key) - - def get_multi(self, keys): - cache = self.cache - return dict((k, cache[k]) for k in keys if k in cache) - - def set(self, key, value, *args, **kwargs): - self.cache[key] = value - - def delete(self, key, *args, **kwargs): - self.cache.pop(key, None) - - def incr(self, key, delta=1): - return self.cache.incr(key, delta) - - -backends = {'memcache': get_best_memcache, - 'memcached': get_best_memcache, - 'pylibmc': get_best_memcache, - 'memory': lambda: (DummyClient, ensure_bytes)} - - -class CacheBackend(KeyValueStoreBackend): - servers = None - supports_autoexpire = True - supports_native_join = True - implements_incr = True - - def __init__(self, app, expires=None, backend=None, - options={}, url=None, **kwargs): - super(CacheBackend, self).__init__(app, **kwargs) - self.url = url - - self.options = dict(self.app.conf.CELERY_CACHE_BACKEND_OPTIONS, - **options) - - self.backend = url or backend or self.app.conf.CELERY_CACHE_BACKEND - if self.backend: - self.backend, _, servers = self.backend.partition('://') - self.servers = servers.rstrip('/').split(';') - self.expires = self.prepare_expires(expires, type=int) - try: - self.Client, self.key_t = backends[self.backend]() - except KeyError: - raise ImproperlyConfigured(UNKNOWN_BACKEND.format( - self.backend, ', '.join(backends))) - self._encode_prefixes() # rencode the keyprefixes - - def get(self, key): - return self.client.get(key) - - def mget(self, keys): - return self.client.get_multi(keys) - - def set(self, key, value): - return self.client.set(key, value, self.expires) - - def delete(self, key): - return self.client.delete(key) - - def _apply_chord_incr(self, header, partial_args, group_id, body, **opts): - self.client.set(self.get_key_for_chord(group_id), 0, time=86400) - return super(CacheBackend, self)._apply_chord_incr( - header, partial_args, group_id, body, **opts - ) - - def incr(self, key): - return self.client.incr(key) - - @cached_property - def client(self): - return self.Client(self.servers, **self.options) - - def __reduce__(self, args=(), kwargs={}): - servers = ';'.join(self.servers) - backend = '{0}://{1}/'.format(self.backend, servers) - kwargs.update( - dict(backend=backend, - expires=self.expires, - options=self.options)) - return super(CacheBackend, self).__reduce__(args, kwargs) - - def as_uri(self, *args, **kwargs): - """Return the backend as an URI. - - This properly handles the case of multiple servers. - - """ - servers = ';'.join(self.servers) - return '{0}://{1}/'.format(self.backend, servers) diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/cassandra.py b/thesisenv/lib/python3.6/site-packages/celery/backends/cassandra.py deleted file mode 100644 index 79f17ee..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/backends/cassandra.py +++ /dev/null @@ -1,196 +0,0 @@ -# -* coding: utf-8 -*- -""" - celery.backends.cassandra - ~~~~~~~~~~~~~~~~~~~~~~~~~ - - Apache Cassandra result store backend. - -""" -from __future__ import absolute_import - -try: # pragma: no cover - import pycassa - from thrift import Thrift - C = pycassa.cassandra.ttypes -except ImportError: # pragma: no cover - pycassa = None # noqa - -import socket -import time - -from celery import states -from celery.exceptions import ImproperlyConfigured -from celery.five import monotonic -from celery.utils.log import get_logger -from celery.utils.timeutils import maybe_timedelta, timedelta_seconds - -from .base import BaseBackend - -__all__ = ['CassandraBackend'] - -logger = get_logger(__name__) - - -class CassandraBackend(BaseBackend): - """Highly fault tolerant Cassandra backend. - - .. attribute:: servers - - List of Cassandra servers with format: ``hostname:port``. - - :raises celery.exceptions.ImproperlyConfigured: if - module :mod:`pycassa` is not available. - - """ - servers = [] - keyspace = None - column_family = None - detailed_mode = False - _retry_timeout = 300 - _retry_wait = 3 - supports_autoexpire = True - - def __init__(self, servers=None, keyspace=None, column_family=None, - cassandra_options=None, detailed_mode=False, **kwargs): - """Initialize Cassandra backend. - - Raises :class:`celery.exceptions.ImproperlyConfigured` if - the :setting:`CASSANDRA_SERVERS` setting is not set. - - """ - super(CassandraBackend, self).__init__(**kwargs) - - self.expires = kwargs.get('expires') or maybe_timedelta( - self.app.conf.CELERY_TASK_RESULT_EXPIRES) - - if not pycassa: - raise ImproperlyConfigured( - 'You need to install the pycassa library to use the ' - 'Cassandra backend. See https://github.com/pycassa/pycassa') - - conf = self.app.conf - self.servers = (servers or - conf.get('CASSANDRA_SERVERS') or - self.servers) - self.keyspace = (keyspace or - conf.get('CASSANDRA_KEYSPACE') or - self.keyspace) - self.column_family = (column_family or - conf.get('CASSANDRA_COLUMN_FAMILY') or - self.column_family) - self.cassandra_options = dict(conf.get('CASSANDRA_OPTIONS') or {}, - **cassandra_options or {}) - self.detailed_mode = (detailed_mode or - conf.get('CASSANDRA_DETAILED_MODE') or - self.detailed_mode) - read_cons = conf.get('CASSANDRA_READ_CONSISTENCY') or 'LOCAL_QUORUM' - write_cons = conf.get('CASSANDRA_WRITE_CONSISTENCY') or 'LOCAL_QUORUM' - try: - self.read_consistency = getattr(pycassa.ConsistencyLevel, - read_cons) - except AttributeError: - self.read_consistency = pycassa.ConsistencyLevel.LOCAL_QUORUM - try: - self.write_consistency = getattr(pycassa.ConsistencyLevel, - write_cons) - except AttributeError: - self.write_consistency = pycassa.ConsistencyLevel.LOCAL_QUORUM - - if not self.servers or not self.keyspace or not self.column_family: - raise ImproperlyConfigured( - 'Cassandra backend not configured.') - - self._column_family = None - - def _retry_on_error(self, fun, *args, **kwargs): - ts = monotonic() + self._retry_timeout - while 1: - try: - return fun(*args, **kwargs) - except (pycassa.InvalidRequestException, - pycassa.TimedOutException, - pycassa.UnavailableException, - pycassa.AllServersUnavailable, - socket.error, - socket.timeout, - Thrift.TException) as exc: - if monotonic() > ts: - raise - logger.warning('Cassandra error: %r. Retrying...', exc) - time.sleep(self._retry_wait) - - def _get_column_family(self): - if self._column_family is None: - conn = pycassa.ConnectionPool(self.keyspace, - server_list=self.servers, - **self.cassandra_options) - self._column_family = pycassa.ColumnFamily( - conn, self.column_family, - read_consistency_level=self.read_consistency, - write_consistency_level=self.write_consistency, - ) - return self._column_family - - def process_cleanup(self): - if self._column_family is not None: - self._column_family = None - - def _store_result(self, task_id, result, status, - traceback=None, request=None, **kwargs): - """Store return value and status of an executed task.""" - - def _do_store(): - cf = self._get_column_family() - date_done = self.app.now() - meta = {'status': status, - 'date_done': date_done.strftime('%Y-%m-%dT%H:%M:%SZ'), - 'traceback': self.encode(traceback), - 'result': self.encode(result), - 'children': self.encode( - self.current_task_children(request), - )} - if self.detailed_mode: - cf.insert(task_id, {date_done: self.encode(meta)}, - ttl=self.expires and timedelta_seconds(self.expires)) - else: - cf.insert(task_id, meta, - ttl=self.expires and timedelta_seconds(self.expires)) - - return self._retry_on_error(_do_store) - - def as_uri(self, include_password=True): - return 'cassandra://' - - def _get_task_meta_for(self, task_id): - """Get task metadata for a task by id.""" - - def _do_get(): - cf = self._get_column_family() - try: - if self.detailed_mode: - row = cf.get(task_id, column_reversed=True, column_count=1) - obj = self.decode(list(row.values())[0]) - else: - obj = cf.get(task_id) - - meta = { - 'task_id': task_id, - 'status': obj['status'], - 'result': self.decode(obj['result']), - 'date_done': obj['date_done'], - 'traceback': self.decode(obj['traceback']), - 'children': self.decode(obj['children']), - } - except (KeyError, pycassa.NotFoundException): - meta = {'status': states.PENDING, 'result': None} - return meta - - return self._retry_on_error(_do_get) - - def __reduce__(self, args=(), kwargs={}): - kwargs.update( - dict(servers=self.servers, - keyspace=self.keyspace, - column_family=self.column_family, - cassandra_options=self.cassandra_options)) - return super(CassandraBackend, self).__reduce__(args, kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/couchbase.py b/thesisenv/lib/python3.6/site-packages/celery/backends/couchbase.py deleted file mode 100644 index cd7555e..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/backends/couchbase.py +++ /dev/null @@ -1,116 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.backends.couchbase - ~~~~~~~~~~~~~~~~~~~~~~~~~ - - CouchBase result store backend. - -""" -from __future__ import absolute_import - -import logging - -try: - from couchbase import Couchbase - from couchbase.connection import Connection - from couchbase.exceptions import NotFoundError -except ImportError: - Couchbase = Connection = NotFoundError = None # noqa - -from kombu.utils.url import _parse_url - -from celery.exceptions import ImproperlyConfigured -from celery.utils.timeutils import maybe_timedelta - -from .base import KeyValueStoreBackend - -__all__ = ['CouchBaseBackend'] - - -class CouchBaseBackend(KeyValueStoreBackend): - """CouchBase backend. - - :raises celery.exceptions.ImproperlyConfigured: if - module :mod:`couchbase` is not available. - - """ - bucket = 'default' - host = 'localhost' - port = 8091 - username = None - password = None - quiet = False - conncache = None - unlock_gil = True - timeout = 2.5 - transcoder = None - - def __init__(self, url=None, *args, **kwargs): - super(CouchBaseBackend, self).__init__(*args, **kwargs) - self.url = url - - self.expires = kwargs.get('expires') or maybe_timedelta( - self.app.conf.CELERY_TASK_RESULT_EXPIRES) - - if Couchbase is None: - raise ImproperlyConfigured( - 'You need to install the couchbase library to use the ' - 'CouchBase backend.', - ) - - uhost = uport = uname = upass = ubucket = None - if url: - _, uhost, uport, uname, upass, ubucket, _ = _parse_url(url) - ubucket = ubucket.strip('/') if ubucket else None - - config = self.app.conf.get('CELERY_COUCHBASE_BACKEND_SETTINGS', None) - if config is not None: - if not isinstance(config, dict): - raise ImproperlyConfigured( - 'Couchbase backend settings should be grouped in a dict', - ) - else: - config = {} - - self.host = uhost or config.get('host', self.host) - self.port = int(uport or config.get('port', self.port)) - self.bucket = ubucket or config.get('bucket', self.bucket) - self.username = uname or config.get('username', self.username) - self.password = upass or config.get('password', self.password) - - self._connection = None - - def _get_connection(self): - """Connect to the Couchbase server.""" - if self._connection is None: - kwargs = {'bucket': self.bucket, 'host': self.host} - - if self.port: - kwargs.update({'port': self.port}) - if self.username: - kwargs.update({'username': self.username}) - if self.password: - kwargs.update({'password': self.password}) - - logging.debug('couchbase settings %r', kwargs) - self._connection = Connection(**kwargs) - return self._connection - - @property - def connection(self): - return self._get_connection() - - def get(self, key): - try: - return self.connection.get(key).value - except NotFoundError: - return None - - def set(self, key, value): - self.connection.set(key, value) - - def mget(self, keys): - return [self.get(key) for key in keys] - - def delete(self, key): - self.connection.delete(key) diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/database/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/backends/database/__init__.py deleted file mode 100644 index f47fdd5..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/backends/database/__init__.py +++ /dev/null @@ -1,201 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.backends.database - ~~~~~~~~~~~~~~~~~~~~~~~~ - - SQLAlchemy result store backend. - -""" -from __future__ import absolute_import - -import logging -from contextlib import contextmanager -from functools import wraps - -from celery import states -from celery.backends.base import BaseBackend -from celery.exceptions import ImproperlyConfigured -from celery.five import range -from celery.utils.timeutils import maybe_timedelta - -from .models import Task -from .models import TaskSet -from .session import SessionManager - -logger = logging.getLogger(__name__) - -__all__ = ['DatabaseBackend'] - - -def _sqlalchemy_installed(): - try: - import sqlalchemy - except ImportError: - raise ImproperlyConfigured( - 'The database result backend requires SQLAlchemy to be installed.' - 'See http://pypi.python.org/pypi/SQLAlchemy') - return sqlalchemy -_sqlalchemy_installed() - -from sqlalchemy.exc import DatabaseError, InvalidRequestError # noqa -from sqlalchemy.orm.exc import StaleDataError # noqa - - -@contextmanager -def session_cleanup(session): - try: - yield - except Exception: - session.rollback() - raise - finally: - session.close() - - -def retry(fun): - - @wraps(fun) - def _inner(*args, **kwargs): - max_retries = kwargs.pop('max_retries', 3) - - for retries in range(max_retries): - try: - return fun(*args, **kwargs) - except (DatabaseError, InvalidRequestError, StaleDataError): - logger.warning( - "Failed operation %s. Retrying %s more times.", - fun.__name__, max_retries - retries - 1, - exc_info=True, - ) - if retries + 1 >= max_retries: - raise - - return _inner - - -class DatabaseBackend(BaseBackend): - """The database result backend.""" - # ResultSet.iterate should sleep this much between each pool, - # to not bombard the database with queries. - subpolling_interval = 0.5 - - def __init__(self, dburi=None, expires=None, - engine_options=None, url=None, **kwargs): - # The `url` argument was added later and is used by - # the app to set backend by url (celery.backends.get_backend_by_url) - super(DatabaseBackend, self).__init__(**kwargs) - conf = self.app.conf - self.expires = maybe_timedelta(self.prepare_expires(expires)) - self.url = url or dburi or conf.CELERY_RESULT_DBURI - self.engine_options = dict( - engine_options or {}, - **conf.CELERY_RESULT_ENGINE_OPTIONS or {}) - self.short_lived_sessions = kwargs.get( - 'short_lived_sessions', - conf.CELERY_RESULT_DB_SHORT_LIVED_SESSIONS, - ) - - tablenames = conf.CELERY_RESULT_DB_TABLENAMES or {} - Task.__table__.name = tablenames.get('task', 'celery_taskmeta') - TaskSet.__table__.name = tablenames.get('group', 'celery_tasksetmeta') - - if not self.url: - raise ImproperlyConfigured( - 'Missing connection string! Do you have ' - 'CELERY_RESULT_DBURI set to a real value?') - - def ResultSession(self, session_manager=SessionManager()): - return session_manager.session_factory( - dburi=self.url, - short_lived_sessions=self.short_lived_sessions, - **self.engine_options - ) - - @retry - def _store_result(self, task_id, result, status, - traceback=None, max_retries=3, **kwargs): - """Store return value and status of an executed task.""" - session = self.ResultSession() - with session_cleanup(session): - task = list(session.query(Task).filter(Task.task_id == task_id)) - task = task and task[0] - if not task: - task = Task(task_id) - session.add(task) - session.flush() - task.result = result - task.status = status - task.traceback = traceback - session.commit() - return result - - @retry - def _get_task_meta_for(self, task_id): - """Get task metadata for a task by id.""" - session = self.ResultSession() - with session_cleanup(session): - task = list(session.query(Task).filter(Task.task_id == task_id)) - task = task and task[0] - if not task: - task = Task(task_id) - task.status = states.PENDING - task.result = None - return self.meta_from_decoded(task.to_dict()) - - @retry - def _save_group(self, group_id, result): - """Store the result of an executed group.""" - session = self.ResultSession() - with session_cleanup(session): - group = TaskSet(group_id, result) - session.add(group) - session.flush() - session.commit() - return result - - @retry - def _restore_group(self, group_id): - """Get metadata for group by id.""" - session = self.ResultSession() - with session_cleanup(session): - group = session.query(TaskSet).filter( - TaskSet.taskset_id == group_id).first() - if group: - return group.to_dict() - - @retry - def _delete_group(self, group_id): - """Delete metadata for group by id.""" - session = self.ResultSession() - with session_cleanup(session): - session.query(TaskSet).filter( - TaskSet.taskset_id == group_id).delete() - session.flush() - session.commit() - - @retry - def _forget(self, task_id): - """Forget about result.""" - session = self.ResultSession() - with session_cleanup(session): - session.query(Task).filter(Task.task_id == task_id).delete() - session.commit() - - def cleanup(self): - """Delete expired metadata.""" - session = self.ResultSession() - expires = self.expires - now = self.app.now() - with session_cleanup(session): - session.query(Task).filter( - Task.date_done < (now - expires)).delete() - session.query(TaskSet).filter( - TaskSet.date_done < (now - expires)).delete() - session.commit() - - def __reduce__(self, args=(), kwargs={}): - kwargs.update( - dict(dburi=self.url, - expires=self.expires, - engine_options=self.engine_options)) - return super(DatabaseBackend, self).__reduce__(args, kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/database/models.py b/thesisenv/lib/python3.6/site-packages/celery/backends/database/models.py deleted file mode 100644 index 2802a00..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/backends/database/models.py +++ /dev/null @@ -1,74 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.backends.database.models - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Database tables for the SQLAlchemy result store backend. - -""" -from __future__ import absolute_import - -from datetime import datetime - -import sqlalchemy as sa -from sqlalchemy.types import PickleType - -from celery import states - -from .session import ResultModelBase - -__all__ = ['Task', 'TaskSet'] - - -class Task(ResultModelBase): - """Task result/status.""" - __tablename__ = 'celery_taskmeta' - __table_args__ = {'sqlite_autoincrement': True} - - id = sa.Column(sa.Integer, sa.Sequence('task_id_sequence'), - primary_key=True, - autoincrement=True) - task_id = sa.Column(sa.String(255), unique=True) - status = sa.Column(sa.String(50), default=states.PENDING) - result = sa.Column(PickleType, nullable=True) - date_done = sa.Column(sa.DateTime, default=datetime.utcnow, - onupdate=datetime.utcnow, nullable=True) - traceback = sa.Column(sa.Text, nullable=True) - - def __init__(self, task_id): - self.task_id = task_id - - def to_dict(self): - return {'task_id': self.task_id, - 'status': self.status, - 'result': self.result, - 'traceback': self.traceback, - 'date_done': self.date_done} - - def __repr__(self): - return ''.format(self) - - -class TaskSet(ResultModelBase): - """TaskSet result""" - __tablename__ = 'celery_tasksetmeta' - __table_args__ = {'sqlite_autoincrement': True} - - id = sa.Column(sa.Integer, sa.Sequence('taskset_id_sequence'), - autoincrement=True, primary_key=True) - taskset_id = sa.Column(sa.String(255), unique=True) - result = sa.Column(PickleType, nullable=True) - date_done = sa.Column(sa.DateTime, default=datetime.utcnow, - nullable=True) - - def __init__(self, taskset_id, result): - self.taskset_id = taskset_id - self.result = result - - def to_dict(self): - return {'taskset_id': self.taskset_id, - 'result': self.result, - 'date_done': self.date_done} - - def __repr__(self): - return ''.format(self) diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/database/session.py b/thesisenv/lib/python3.6/site-packages/celery/backends/database/session.py deleted file mode 100644 index 1575d7f..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/backends/database/session.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.backends.database.session - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - SQLAlchemy sessions. - -""" -from __future__ import absolute_import - -from billiard.util import register_after_fork - -from sqlalchemy import create_engine -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.orm import sessionmaker -from sqlalchemy.pool import NullPool - -ResultModelBase = declarative_base() - -__all__ = ['SessionManager'] - - -class SessionManager(object): - def __init__(self): - self._engines = {} - self._sessions = {} - self.forked = False - self.prepared = False - register_after_fork(self, self._after_fork) - - def _after_fork(self,): - self.forked = True - - def get_engine(self, dburi, **kwargs): - if self.forked: - try: - return self._engines[dburi] - except KeyError: - engine = self._engines[dburi] = create_engine(dburi, **kwargs) - return engine - else: - kwargs['poolclass'] = NullPool - return create_engine(dburi, **kwargs) - - def create_session(self, dburi, short_lived_sessions=False, **kwargs): - engine = self.get_engine(dburi, **kwargs) - if self.forked: - if short_lived_sessions or dburi not in self._sessions: - self._sessions[dburi] = sessionmaker(bind=engine) - return engine, self._sessions[dburi] - else: - return engine, sessionmaker(bind=engine) - - def prepare_models(self, engine): - if not self.prepared: - ResultModelBase.metadata.create_all(engine) - self.prepared = True - - def session_factory(self, dburi, **kwargs): - engine, session = self.create_session(dburi, **kwargs) - self.prepare_models(engine) - return session() diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/mongodb.py b/thesisenv/lib/python3.6/site-packages/celery/backends/mongodb.py deleted file mode 100644 index 281c38c..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/backends/mongodb.py +++ /dev/null @@ -1,264 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.backends.mongodb - ~~~~~~~~~~~~~~~~~~~~~~~ - - MongoDB result store backend. - -""" -from __future__ import absolute_import - -from datetime import datetime - -from kombu.syn import detect_environment -from kombu.utils import cached_property -from kombu.utils.url import maybe_sanitize_url - -from celery import states -from celery.exceptions import ImproperlyConfigured -from celery.five import items, string_t -from celery.utils.timeutils import maybe_timedelta - -from .base import BaseBackend - -try: - import pymongo -except ImportError: # pragma: no cover - pymongo = None # noqa - -if pymongo: - try: - from bson.binary import Binary - except ImportError: # pragma: no cover - from pymongo.binary import Binary # noqa -else: # pragma: no cover - Binary = None # noqa - -__all__ = ['MongoBackend'] - - -class MongoBackend(BaseBackend): - """MongoDB result backend. - - :raises celery.exceptions.ImproperlyConfigured: if - module :mod:`pymongo` is not available. - - """ - - host = 'localhost' - port = 27017 - user = None - password = None - database_name = 'celery' - taskmeta_collection = 'celery_taskmeta' - max_pool_size = 10 - options = None - - supports_autoexpire = False - - _connection = None - - def __init__(self, app=None, url=None, **kwargs): - self.options = {} - super(MongoBackend, self).__init__(app, **kwargs) - self.expires = kwargs.get('expires') or maybe_timedelta( - self.app.conf.CELERY_TASK_RESULT_EXPIRES) - - if not pymongo: - raise ImproperlyConfigured( - 'You need to install the pymongo library to use the ' - 'MongoDB backend.') - - config = self.app.conf.get('CELERY_MONGODB_BACKEND_SETTINGS') - if config is not None: - if not isinstance(config, dict): - raise ImproperlyConfigured( - 'MongoDB backend settings should be grouped in a dict') - config = dict(config) # do not modify original - - self.host = config.pop('host', self.host) - self.port = int(config.pop('port', self.port)) - self.user = config.pop('user', self.user) - self.password = config.pop('password', self.password) - self.database_name = config.pop('database', self.database_name) - self.taskmeta_collection = config.pop( - 'taskmeta_collection', self.taskmeta_collection, - ) - - self.options = dict(config, **config.pop('options', None) or {}) - - # Set option defaults - for key, value in items(self._prepare_client_options()): - self.options.setdefault(key, value) - - self.url = url - if self.url: - # Specifying backend as an URL - self.host = self.url - - def _prepare_client_options(self): - if pymongo.version_tuple >= (3, ): - return {'maxPoolSize': self.max_pool_size} - else: # pragma: no cover - options = { - 'max_pool_size': self.max_pool_size, - 'auto_start_request': False - } - if detect_environment() != 'default': - options['use_greenlets'] = True - return options - - def _get_connection(self): - """Connect to the MongoDB server.""" - if self._connection is None: - from pymongo import MongoClient - - # The first pymongo.Connection() argument (host) can be - # a list of ['host:port'] elements or a mongodb connection - # URI. If this is the case, don't use self.port - # but let pymongo get the port(s) from the URI instead. - # This enables the use of replica sets and sharding. - # See pymongo.Connection() for more info. - url = self.host - if isinstance(url, string_t) \ - and not url.startswith('mongodb://'): - url = 'mongodb://{0}:{1}'.format(url, self.port) - if url == 'mongodb://': - url = url + 'localhost' - self._connection = MongoClient(host=url, **self.options) - - return self._connection - - def process_cleanup(self): - if self._connection is not None: - # MongoDB connection will be closed automatically when object - # goes out of scope - del(self.collection) - del(self.database) - self._connection = None - - def _store_result(self, task_id, result, status, - traceback=None, request=None, **kwargs): - """Store return value and status of an executed task.""" - meta = {'_id': task_id, - 'status': status, - 'result': Binary(self.encode(result)), - 'date_done': datetime.utcnow(), - 'traceback': Binary(self.encode(traceback)), - 'children': Binary(self.encode( - self.current_task_children(request), - ))} - self.collection.save(meta) - - return result - - def _get_task_meta_for(self, task_id): - """Get task metadata for a task by id.""" - - obj = self.collection.find_one({'_id': task_id}) - if not obj: - return {'status': states.PENDING, 'result': None} - - meta = { - 'task_id': obj['_id'], - 'status': obj['status'], - 'result': self.decode(obj['result']), - 'date_done': obj['date_done'], - 'traceback': self.decode(obj['traceback']), - 'children': self.decode(obj['children']), - } - - return meta - - def _save_group(self, group_id, result): - """Save the group result.""" - meta = {'_id': group_id, - 'result': Binary(self.encode(result)), - 'date_done': datetime.utcnow()} - self.collection.save(meta) - - return result - - def _restore_group(self, group_id): - """Get the result for a group by id.""" - obj = self.collection.find_one({'_id': group_id}) - if not obj: - return - - meta = { - 'task_id': obj['_id'], - 'result': self.decode(obj['result']), - 'date_done': obj['date_done'], - } - - return meta - - def _delete_group(self, group_id): - """Delete a group by id.""" - self.collection.remove({'_id': group_id}) - - def _forget(self, task_id): - """Remove result from MongoDB. - - :raises celery.exceptions.OperationsError: - if the task_id could not be removed. - - """ - # By using safe=True, this will wait until it receives a response from - # the server. Likewise, it will raise an OperationsError if the - # response was unable to be completed. - self.collection.remove({'_id': task_id}) - - def cleanup(self): - """Delete expired metadata.""" - self.collection.remove( - {'date_done': {'$lt': self.app.now() - self.expires}}, - ) - - def __reduce__(self, args=(), kwargs={}): - return super(MongoBackend, self).__reduce__( - args, dict(kwargs, expires=self.expires, url=self.url), - ) - - def _get_database(self): - conn = self._get_connection() - db = conn[self.database_name] - if self.user and self.password: - if not db.authenticate(self.user, - self.password): - raise ImproperlyConfigured( - 'Invalid MongoDB username or password.') - return db - - @cached_property - def database(self): - """Get database from MongoDB connection and perform authentication - if necessary.""" - return self._get_database() - - @cached_property - def collection(self): - """Get the metadata task collection.""" - collection = self.database[self.taskmeta_collection] - - # Ensure an index on date_done is there, if not process the index - # in the background. Once completed cleanup will be much faster - collection.ensure_index('date_done', background='true') - return collection - - def as_uri(self, include_password=False): - """Return the backend as an URI. - - :keyword include_password: Censor passwords. - - """ - if not self.url: - return 'mongodb://' - if include_password: - return self.url - - if ',' not in self.url: - return maybe_sanitize_url(self.url) - - uri1, remainder = self.url.split(',', 1) - return ','.join([maybe_sanitize_url(uri1), remainder]) diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/redis.py b/thesisenv/lib/python3.6/site-packages/celery/backends/redis.py deleted file mode 100644 index 1e838c1..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/backends/redis.py +++ /dev/null @@ -1,295 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.backends.redis - ~~~~~~~~~~~~~~~~~~~~~ - - Redis result store backend. - -""" -from __future__ import absolute_import - -from functools import partial - -from kombu.utils import cached_property, retry_over_time -from kombu.utils.url import _parse_url - -from celery import states -from celery.canvas import maybe_signature -from celery.exceptions import ChordError, ImproperlyConfigured -from celery.five import string_t -from celery.utils import deprecated_property, strtobool -from celery.utils.functional import dictfilter -from celery.utils.log import get_logger -from celery.utils.timeutils import humanize_seconds - -from .base import KeyValueStoreBackend - -try: - import redis - from redis.exceptions import ConnectionError - from kombu.transport.redis import get_redis_error_classes -except ImportError: # pragma: no cover - redis = None # noqa - ConnectionError = None # noqa - get_redis_error_classes = None # noqa - -__all__ = ['RedisBackend'] - -REDIS_MISSING = """\ -You need to install the redis library in order to use \ -the Redis result store backend.""" - -logger = get_logger(__name__) -error = logger.error - - -class RedisBackend(KeyValueStoreBackend): - """Redis task result store.""" - - #: redis-py client module. - redis = redis - - #: Maximium number of connections in the pool. - max_connections = None - - supports_autoexpire = True - supports_native_join = True - implements_incr = True - - def __init__(self, host=None, port=None, db=None, password=None, - expires=None, max_connections=None, url=None, - connection_pool=None, new_join=False, **kwargs): - super(RedisBackend, self).__init__(**kwargs) - conf = self.app.conf - if self.redis is None: - raise ImproperlyConfigured(REDIS_MISSING) - self._client_capabilities = self._detect_client_capabilities() - - # For compatibility with the old REDIS_* configuration keys. - def _get(key): - for prefix in 'CELERY_REDIS_{0}', 'REDIS_{0}': - try: - return conf[prefix.format(key)] - except KeyError: - pass - if host and '://' in host: - url = host - host = None - - self.max_connections = ( - max_connections or _get('MAX_CONNECTIONS') or self.max_connections - ) - self._ConnectionPool = connection_pool - - self.connparams = { - 'host': _get('HOST') or 'localhost', - 'port': _get('PORT') or 6379, - 'db': _get('DB') or 0, - 'password': _get('PASSWORD'), - 'max_connections': self.max_connections, - } - if url: - self.connparams = self._params_from_url(url, self.connparams) - self.url = url - self.expires = self.prepare_expires(expires, type=int) - - try: - new_join = strtobool(self.connparams.pop('new_join')) - except KeyError: - pass - if new_join: - self.apply_chord = self._new_chord_apply - self.on_chord_part_return = self._new_chord_return - - self.connection_errors, self.channel_errors = ( - get_redis_error_classes() if get_redis_error_classes - else ((), ())) - - def _params_from_url(self, url, defaults): - scheme, host, port, user, password, path, query = _parse_url(url) - connparams = dict( - defaults, **dictfilter({ - 'host': host, 'port': port, 'password': password, - 'db': query.pop('virtual_host', None)}) - ) - - if scheme == 'socket': - # use 'path' as path to the socket… in this case - # the database number should be given in 'query' - connparams.update({ - 'connection_class': self.redis.UnixDomainSocketConnection, - 'path': '/' + path, - }) - # host+port are invalid options when using this connection type. - connparams.pop('host', None) - connparams.pop('port', None) - else: - connparams['db'] = path - - # db may be string and start with / like in kombu. - db = connparams.get('db') or 0 - db = db.strip('/') if isinstance(db, string_t) else db - connparams['db'] = int(db) - - # Query parameters override other parameters - connparams.update(query) - return connparams - - def get(self, key): - return self.client.get(key) - - def mget(self, keys): - return self.client.mget(keys) - - def ensure(self, fun, args, **policy): - retry_policy = dict(self.retry_policy, **policy) - max_retries = retry_policy.get('max_retries') - return retry_over_time( - fun, self.connection_errors, args, {}, - partial(self.on_connection_error, max_retries), - **retry_policy - ) - - def on_connection_error(self, max_retries, exc, intervals, retries): - tts = next(intervals) - error('Connection to Redis lost: Retry (%s/%s) %s.', - retries, max_retries or 'Inf', - humanize_seconds(tts, 'in ')) - return tts - - def set(self, key, value, **retry_policy): - return self.ensure(self._set, (key, value), **retry_policy) - - def _set(self, key, value): - with self.client.pipeline() as pipe: - if self.expires: - pipe.setex(key, value, self.expires) - else: - pipe.set(key, value) - pipe.publish(key, value) - pipe.execute() - - def delete(self, key): - self.client.delete(key) - - def incr(self, key): - return self.client.incr(key) - - def expire(self, key, value): - return self.client.expire(key, value) - - def _unpack_chord_result(self, tup, decode, - EXCEPTION_STATES=states.EXCEPTION_STATES, - PROPAGATE_STATES=states.PROPAGATE_STATES): - _, tid, state, retval = decode(tup) - if state in EXCEPTION_STATES: - retval = self.exception_to_python(retval) - if state in PROPAGATE_STATES: - raise ChordError('Dependency {0} raised {1!r}'.format(tid, retval)) - return retval - - def _new_chord_apply(self, header, partial_args, group_id, body, - result=None, **options): - # avoids saving the group in the redis db. - return header(*partial_args, task_id=group_id) - - def _new_chord_return(self, task, state, result, propagate=None, - PROPAGATE_STATES=states.PROPAGATE_STATES): - app = self.app - if propagate is None: - propagate = self.app.conf.CELERY_CHORD_PROPAGATES - request = task.request - tid, gid = request.id, request.group - if not gid or not tid: - return - - client = self.client - jkey = self.get_key_for_group(gid, '.j') - result = self.encode_result(result, state) - with client.pipeline() as pipe: - _, readycount, _ = pipe \ - .rpush(jkey, self.encode([1, tid, state, result])) \ - .llen(jkey) \ - .expire(jkey, 86400) \ - .execute() - - try: - callback = maybe_signature(request.chord, app=app) - total = callback['chord_size'] - if readycount == total: - decode, unpack = self.decode, self._unpack_chord_result - with client.pipeline() as pipe: - resl, _, = pipe \ - .lrange(jkey, 0, total) \ - .delete(jkey) \ - .execute() - try: - callback.delay([unpack(tup, decode) for tup in resl]) - except Exception as exc: - error('Chord callback for %r raised: %r', - request.group, exc, exc_info=1) - return self.chord_error_from_stack( - callback, - ChordError('Callback error: {0!r}'.format(exc)), - ) - except ChordError as exc: - error('Chord %r raised: %r', request.group, exc, exc_info=1) - return self.chord_error_from_stack(callback, exc) - except Exception as exc: - error('Chord %r raised: %r', request.group, exc, exc_info=1) - return self.chord_error_from_stack( - callback, ChordError('Join error: {0!r}'.format(exc)), - ) - - def _detect_client_capabilities(self, socket_connect_timeout=False): - if self.redis.VERSION < (2, 4, 4): - raise ImproperlyConfigured( - 'Redis backend requires redis-py versions 2.4.4 or later. ' - 'You have {0.__version__}'.format(redis)) - if self.redis.VERSION >= (2, 10): - socket_connect_timeout = True - return {'socket_connect_timeout': socket_connect_timeout} - - def _create_client(self, socket_timeout=None, socket_connect_timeout=None, - **params): - return self._new_redis_client( - socket_timeout=socket_timeout and float(socket_timeout), - socket_connect_timeout=socket_connect_timeout and float( - socket_connect_timeout), **params - ) - - def _new_redis_client(self, **params): - if not self._client_capabilities['socket_connect_timeout']: - params.pop('socket_connect_timeout', None) - return self.redis.Redis(connection_pool=self.ConnectionPool(**params)) - - @property - def ConnectionPool(self): - if self._ConnectionPool is None: - self._ConnectionPool = self.redis.ConnectionPool - return self._ConnectionPool - - @cached_property - def client(self): - return self._create_client(**self.connparams) - - def __reduce__(self, args=(), kwargs={}): - return super(RedisBackend, self).__reduce__( - (self.url, ), {'expires': self.expires}, - ) - - @deprecated_property(3.2, 3.3) - def host(self): - return self.connparams['host'] - - @deprecated_property(3.2, 3.3) - def port(self): - return self.connparams['port'] - - @deprecated_property(3.2, 3.3) - def db(self): - return self.connparams['db'] - - @deprecated_property(3.2, 3.3) - def password(self): - return self.connparams['password'] diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/rpc.py b/thesisenv/lib/python3.6/site-packages/celery/backends/rpc.py deleted file mode 100644 index 92bcc61..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/backends/rpc.py +++ /dev/null @@ -1,67 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.backends.rpc - ~~~~~~~~~~~~~~~~~~~ - - RPC-style result backend, using reply-to and one queue per client. - -""" -from __future__ import absolute_import - -from kombu import Consumer, Exchange -from kombu.common import maybe_declare -from kombu.utils import cached_property - -from celery import current_task -from celery.backends import amqp - -__all__ = ['RPCBackend'] - - -class RPCBackend(amqp.AMQPBackend): - persistent = False - - class Consumer(Consumer): - auto_declare = False - - def _create_exchange(self, name, type='direct', delivery_mode=2): - # uses direct to queue routing (anon exchange). - return Exchange(None) - - def on_task_call(self, producer, task_id): - maybe_declare(self.binding(producer.channel), retry=True) - - def _create_binding(self, task_id): - return self.binding - - def _many_bindings(self, ids): - return [self.binding] - - def rkey(self, task_id): - return task_id - - def destination_for(self, task_id, request): - # Request is a new argument for backends, so must still support - # old code that rely on current_task - try: - request = request or current_task.request - except AttributeError: - raise RuntimeError( - 'RPC backend missing task request for {0!r}'.format(task_id), - ) - return request.reply_to, request.correlation_id or task_id - - def on_reply_declare(self, task_id): - pass - - def as_uri(self, include_password=True): - return 'rpc://' - - @property - def binding(self): - return self.Queue(self.oid, self.exchange, self.oid, - durable=False, auto_delete=False) - - @cached_property - def oid(self): - return self.app.oid diff --git a/thesisenv/lib/python3.6/site-packages/celery/beat.py b/thesisenv/lib/python3.6/site-packages/celery/beat.py deleted file mode 100644 index 368a903..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/beat.py +++ /dev/null @@ -1,571 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.beat - ~~~~~~~~~~~ - - The periodic task scheduler. - -""" -from __future__ import absolute_import - -import errno -import os -import time -import shelve -import sys -import traceback - -from threading import Event, Thread - -from billiard import ensure_multiprocessing -from billiard.process import Process -from billiard.common import reset_signals -from kombu.utils import cached_property, reprcall -from kombu.utils.functional import maybe_evaluate - -from . import __version__ -from . import platforms -from . import signals -from .five import items, reraise, values, monotonic -from .schedules import maybe_schedule, crontab -from .utils.imports import instantiate -from .utils.timeutils import humanize_seconds -from .utils.log import get_logger, iter_open_logger_fds - -__all__ = ['SchedulingError', 'ScheduleEntry', 'Scheduler', - 'PersistentScheduler', 'Service', 'EmbeddedService'] - -logger = get_logger(__name__) -debug, info, error, warning = (logger.debug, logger.info, - logger.error, logger.warning) - -DEFAULT_MAX_INTERVAL = 300 # 5 minutes - - -class SchedulingError(Exception): - """An error occured while scheduling a task.""" - - -class ScheduleEntry(object): - """An entry in the scheduler. - - :keyword name: see :attr:`name`. - :keyword schedule: see :attr:`schedule`. - :keyword args: see :attr:`args`. - :keyword kwargs: see :attr:`kwargs`. - :keyword options: see :attr:`options`. - :keyword last_run_at: see :attr:`last_run_at`. - :keyword total_run_count: see :attr:`total_run_count`. - :keyword relative: Is the time relative to when the server starts? - - """ - - #: The task name - name = None - - #: The schedule (run_every/crontab) - schedule = None - - #: Positional arguments to apply. - args = None - - #: Keyword arguments to apply. - kwargs = None - - #: Task execution options. - options = None - - #: The time and date of when this task was last scheduled. - last_run_at = None - - #: Total number of times this task has been scheduled. - total_run_count = 0 - - def __init__(self, name=None, task=None, last_run_at=None, - total_run_count=None, schedule=None, args=(), kwargs={}, - options={}, relative=False, app=None): - self.app = app - self.name = name - self.task = task - self.args = args - self.kwargs = kwargs - self.options = options - self.schedule = maybe_schedule(schedule, relative, app=self.app) - self.last_run_at = last_run_at or self._default_now() - self.total_run_count = total_run_count or 0 - - def _default_now(self): - return self.schedule.now() if self.schedule else self.app.now() - - def _next_instance(self, last_run_at=None): - """Return a new instance of the same class, but with - its date and count fields updated.""" - return self.__class__(**dict( - self, - last_run_at=last_run_at or self._default_now(), - total_run_count=self.total_run_count + 1, - )) - __next__ = next = _next_instance # for 2to3 - - def __reduce__(self): - return self.__class__, ( - self.name, self.task, self.last_run_at, self.total_run_count, - self.schedule, self.args, self.kwargs, self.options, - ) - - def update(self, other): - """Update values from another entry. - - Does only update "editable" fields (task, schedule, args, kwargs, - options). - - """ - self.__dict__.update({'task': other.task, 'schedule': other.schedule, - 'args': other.args, 'kwargs': other.kwargs, - 'options': other.options}) - - def is_due(self): - """See :meth:`~celery.schedule.schedule.is_due`.""" - return self.schedule.is_due(self.last_run_at) - - def __iter__(self): - return iter(items(vars(self))) - - def __repr__(self): - return '%s', entry.task, result.id) - return next_time_to_run - - def tick(self): - """Run a tick, that is one iteration of the scheduler. - - Executes all due tasks. - - """ - remaining_times = [] - try: - for entry in values(self.schedule): - next_time_to_run = self.maybe_due(entry, self.publisher) - if next_time_to_run: - remaining_times.append(next_time_to_run) - except RuntimeError: - pass - - return min(remaining_times + [self.max_interval]) - - def should_sync(self): - return ( - (not self._last_sync or - (monotonic() - self._last_sync) > self.sync_every) or - (self.sync_every_tasks and - self._tasks_since_sync >= self.sync_every_tasks) - ) - - def reserve(self, entry): - new_entry = self.schedule[entry.name] = next(entry) - return new_entry - - def apply_async(self, entry, publisher=None, **kwargs): - # Update timestamps and run counts before we actually execute, - # so we have that done if an exception is raised (doesn't schedule - # forever.) - entry = self.reserve(entry) - task = self.app.tasks.get(entry.task) - - try: - if task: - result = task.apply_async(entry.args, entry.kwargs, - publisher=publisher, - **entry.options) - else: - result = self.send_task(entry.task, entry.args, entry.kwargs, - publisher=publisher, - **entry.options) - except Exception as exc: - reraise(SchedulingError, SchedulingError( - "Couldn't apply scheduled task {0.name}: {exc}".format( - entry, exc=exc)), sys.exc_info()[2]) - finally: - self._tasks_since_sync += 1 - if self.should_sync(): - self._do_sync() - return result - - def send_task(self, *args, **kwargs): - return self.app.send_task(*args, **kwargs) - - def setup_schedule(self): - self.install_default_entries(self.data) - - def _do_sync(self): - try: - debug('beat: Synchronizing schedule...') - self.sync() - finally: - self._last_sync = monotonic() - self._tasks_since_sync = 0 - - def sync(self): - pass - - def close(self): - self.sync() - - def add(self, **kwargs): - entry = self.Entry(app=self.app, **kwargs) - self.schedule[entry.name] = entry - return entry - - def _maybe_entry(self, name, entry): - if isinstance(entry, self.Entry): - entry.app = self.app - return entry - return self.Entry(**dict(entry, name=name, app=self.app)) - - def update_from_dict(self, dict_): - self.schedule.update(dict( - (name, self._maybe_entry(name, entry)) - for name, entry in items(dict_))) - - def merge_inplace(self, b): - schedule = self.schedule - A, B = set(schedule), set(b) - - # Remove items from disk not in the schedule anymore. - for key in A ^ B: - schedule.pop(key, None) - - # Update and add new items in the schedule - for key in B: - entry = self.Entry(**dict(b[key], name=key, app=self.app)) - if schedule.get(key): - schedule[key].update(entry) - else: - schedule[key] = entry - - def _ensure_connected(self): - # callback called for each retry while the connection - # can't be established. - def _error_handler(exc, interval): - error('beat: Connection error: %s. ' - 'Trying again in %s seconds...', exc, interval) - - return self.connection.ensure_connection( - _error_handler, self.app.conf.BROKER_CONNECTION_MAX_RETRIES - ) - - def get_schedule(self): - return self.data - - def set_schedule(self, schedule): - self.data = schedule - schedule = property(get_schedule, set_schedule) - - @cached_property - def connection(self): - return self.app.connection() - - @cached_property - def publisher(self): - return self.Publisher(self._ensure_connected()) - - @property - def info(self): - return '' - - -class PersistentScheduler(Scheduler): - persistence = shelve - known_suffixes = ('', '.db', '.dat', '.bak', '.dir') - - _store = None - - def __init__(self, *args, **kwargs): - self.schedule_filename = kwargs.get('schedule_filename') - Scheduler.__init__(self, *args, **kwargs) - - def _remove_db(self): - for suffix in self.known_suffixes: - with platforms.ignore_errno(errno.ENOENT): - os.remove(self.schedule_filename + suffix) - - def _open_schedule(self): - return self.persistence.open(self.schedule_filename, writeback=True) - - def _destroy_open_corrupted_schedule(self, exc): - error('Removing corrupted schedule file %r: %r', - self.schedule_filename, exc, exc_info=True) - self._remove_db() - return self._open_schedule() - - def setup_schedule(self): - try: - self._store = self._open_schedule() - # In some cases there may be different errors from a storage - # backend for corrupted files. Example - DBPageNotFoundError - # exception from bsddb. In such case the file will be - # successfully opened but the error will be raised on first key - # retrieving. - self._store.keys() - except Exception as exc: - self._store = self._destroy_open_corrupted_schedule(exc) - - for _ in (1, 2): - try: - self._store['entries'] - except KeyError: - # new schedule db - try: - self._store['entries'] = {} - except KeyError as exc: - self._store = self._destroy_open_corrupted_schedule(exc) - continue - else: - if '__version__' not in self._store: - warning('DB Reset: Account for new __version__ field') - self._store.clear() # remove schedule at 2.2.2 upgrade. - elif 'tz' not in self._store: - warning('DB Reset: Account for new tz field') - self._store.clear() # remove schedule at 3.0.8 upgrade - elif 'utc_enabled' not in self._store: - warning('DB Reset: Account for new utc_enabled field') - self._store.clear() # remove schedule at 3.0.9 upgrade - break - - tz = self.app.conf.CELERY_TIMEZONE - stored_tz = self._store.get('tz') - if stored_tz is not None and stored_tz != tz: - warning('Reset: Timezone changed from %r to %r', stored_tz, tz) - self._store.clear() # Timezone changed, reset db! - utc = self.app.conf.CELERY_ENABLE_UTC - stored_utc = self._store.get('utc_enabled') - if stored_utc is not None and stored_utc != utc: - choices = {True: 'enabled', False: 'disabled'} - warning('Reset: UTC changed from %s to %s', - choices[stored_utc], choices[utc]) - self._store.clear() # UTC setting changed, reset db! - entries = self._store.setdefault('entries', {}) - self.merge_inplace(self.app.conf.CELERYBEAT_SCHEDULE) - self.install_default_entries(self.schedule) - self._store.update(__version__=__version__, tz=tz, utc_enabled=utc) - self.sync() - debug('Current schedule:\n' + '\n'.join( - repr(entry) for entry in values(entries))) - - def get_schedule(self): - return self._store['entries'] - - def set_schedule(self, schedule): - self._store['entries'] = schedule - schedule = property(get_schedule, set_schedule) - - def sync(self): - if self._store is not None: - self._store.sync() - - def close(self): - self.sync() - self._store.close() - - @property - def info(self): - return ' . db -> {self.schedule_filename}'.format(self=self) - - -class Service(object): - scheduler_cls = PersistentScheduler - - def __init__(self, app, max_interval=None, schedule_filename=None, - scheduler_cls=None): - self.app = app - self.max_interval = (max_interval or - app.conf.CELERYBEAT_MAX_LOOP_INTERVAL) - self.scheduler_cls = scheduler_cls or self.scheduler_cls - self.schedule_filename = ( - schedule_filename or app.conf.CELERYBEAT_SCHEDULE_FILENAME) - - self._is_shutdown = Event() - self._is_stopped = Event() - - def __reduce__(self): - return self.__class__, (self.max_interval, self.schedule_filename, - self.scheduler_cls, self.app) - - def start(self, embedded_process=False, drift=-0.010): - info('beat: Starting...') - debug('beat: Ticking with max interval->%s', - humanize_seconds(self.scheduler.max_interval)) - - signals.beat_init.send(sender=self) - if embedded_process: - signals.beat_embedded_init.send(sender=self) - platforms.set_process_title('celery beat') - - try: - while not self._is_shutdown.is_set(): - interval = self.scheduler.tick() - interval = interval + drift if interval else interval - if interval and interval > 0: - debug('beat: Waking up %s.', - humanize_seconds(interval, prefix='in ')) - time.sleep(interval) - if self.scheduler.should_sync(): - self.scheduler._do_sync() - except (KeyboardInterrupt, SystemExit): - self._is_shutdown.set() - finally: - self.sync() - - def sync(self): - self.scheduler.close() - self._is_stopped.set() - - def stop(self, wait=False): - info('beat: Shutting down...') - self._is_shutdown.set() - wait and self._is_stopped.wait() # block until shutdown done. - - def get_scheduler(self, lazy=False): - filename = self.schedule_filename - scheduler = instantiate(self.scheduler_cls, - app=self.app, - schedule_filename=filename, - max_interval=self.max_interval, - lazy=lazy) - return scheduler - - @cached_property - def scheduler(self): - return self.get_scheduler() - - -class _Threaded(Thread): - """Embedded task scheduler using threading.""" - - def __init__(self, app, **kwargs): - super(_Threaded, self).__init__() - self.app = app - self.service = Service(app, **kwargs) - self.daemon = True - self.name = 'Beat' - - def run(self): - self.app.set_current() - self.service.start() - - def stop(self): - self.service.stop(wait=True) - - -try: - ensure_multiprocessing() -except NotImplementedError: # pragma: no cover - _Process = None -else: - class _Process(Process): # noqa - - def __init__(self, app, **kwargs): - super(_Process, self).__init__() - self.app = app - self.service = Service(app, **kwargs) - self.name = 'Beat' - - def run(self): - reset_signals(full=False) - platforms.close_open_fds([ - sys.__stdin__, sys.__stdout__, sys.__stderr__, - ] + list(iter_open_logger_fds())) - self.app.set_default() - self.app.set_current() - self.service.start(embedded_process=True) - - def stop(self): - self.service.stop() - self.terminate() - - -def EmbeddedService(app, max_interval=None, **kwargs): - """Return embedded clock service. - - :keyword thread: Run threaded instead of as a separate process. - Uses :mod:`multiprocessing` by default, if available. - - """ - if kwargs.pop('thread', False) or _Process is None: - # Need short max interval to be able to stop thread - # in reasonable time. - return _Threaded(app, max_interval=1, **kwargs) - return _Process(app, max_interval=max_interval, **kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/bin/__init__.py deleted file mode 100644 index 3f44b50..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/bin/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from __future__ import absolute_import - -from .base import Option - -__all__ = ['Option'] diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/amqp.py b/thesisenv/lib/python3.6/site-packages/celery/bin/amqp.py deleted file mode 100644 index ce3b351..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/bin/amqp.py +++ /dev/null @@ -1,380 +0,0 @@ -# -*- coding: utf-8 -*- -""" -The :program:`celery amqp` command. - -.. program:: celery amqp - -""" -from __future__ import absolute_import, print_function, unicode_literals - -import cmd -import sys -import shlex -import pprint - -from functools import partial -from itertools import count - -from kombu.utils.encoding import safe_str - -from celery.utils.functional import padlist - -from celery.bin.base import Command -from celery.five import string_t -from celery.utils import strtobool - -__all__ = ['AMQPAdmin', 'AMQShell', 'Spec', 'amqp'] - -# Map to coerce strings to other types. -COERCE = {bool: strtobool} - -HELP_HEADER = """ -Commands --------- -""".rstrip() - -EXAMPLE_TEXT = """ -Example: - -> queue.delete myqueue yes no -""" - -say = partial(print, file=sys.stderr) - - -class Spec(object): - """AMQP Command specification. - - Used to convert arguments to Python values and display various help - and tooltips. - - :param args: see :attr:`args`. - :keyword returns: see :attr:`returns`. - - .. attribute args:: - - List of arguments this command takes. Should - contain `(argument_name, argument_type)` tuples. - - .. attribute returns: - - Helpful human string representation of what this command returns. - May be :const:`None`, to signify the return type is unknown. - - """ - def __init__(self, *args, **kwargs): - self.args = args - self.returns = kwargs.get('returns') - - def coerce(self, index, value): - """Coerce value for argument at index.""" - arg_info = self.args[index] - arg_type = arg_info[1] - # Might be a custom way to coerce the string value, - # so look in the coercion map. - return COERCE.get(arg_type, arg_type)(value) - - def str_args_to_python(self, arglist): - """Process list of string arguments to values according to spec. - - e.g: - - >>> spec = Spec([('queue', str), ('if_unused', bool)]) - >>> spec.str_args_to_python('pobox', 'true') - ('pobox', True) - - """ - return tuple( - self.coerce(index, value) for index, value in enumerate(arglist)) - - def format_response(self, response): - """Format the return value of this command in a human-friendly way.""" - if not self.returns: - return 'ok.' if response is None else response - if callable(self.returns): - return self.returns(response) - return self.returns.format(response) - - def format_arg(self, name, type, default_value=None): - if default_value is not None: - return '{0}:{1}'.format(name, default_value) - return name - - def format_signature(self): - return ' '.join(self.format_arg(*padlist(list(arg), 3)) - for arg in self.args) - - -def dump_message(message): - if message is None: - return 'No messages in queue. basic.publish something.' - return {'body': message.body, - 'properties': message.properties, - 'delivery_info': message.delivery_info} - - -def format_declare_queue(ret): - return 'ok. queue:{0} messages:{1} consumers:{2}.'.format(*ret) - - -class AMQShell(cmd.Cmd): - """AMQP API Shell. - - :keyword connect: Function used to connect to the server, must return - connection object. - - :keyword silent: If :const:`True`, the commands won't have annoying - output not relevant when running in non-shell mode. - - - .. attribute: builtins - - Mapping of built-in command names -> method names - - .. attribute:: amqp - - Mapping of AMQP API commands and their :class:`Spec`. - - """ - conn = None - chan = None - prompt_fmt = '{self.counter}> ' - identchars = cmd.IDENTCHARS = '.' - needs_reconnect = False - counter = 1 - inc_counter = count(2) - - builtins = {'EOF': 'do_exit', - 'exit': 'do_exit', - 'help': 'do_help'} - - amqp = { - 'exchange.declare': Spec(('exchange', str), - ('type', str), - ('passive', bool, 'no'), - ('durable', bool, 'no'), - ('auto_delete', bool, 'no'), - ('internal', bool, 'no')), - 'exchange.delete': Spec(('exchange', str), - ('if_unused', bool)), - 'queue.bind': Spec(('queue', str), - ('exchange', str), - ('routing_key', str)), - 'queue.declare': Spec(('queue', str), - ('passive', bool, 'no'), - ('durable', bool, 'no'), - ('exclusive', bool, 'no'), - ('auto_delete', bool, 'no'), - returns=format_declare_queue), - 'queue.delete': Spec(('queue', str), - ('if_unused', bool, 'no'), - ('if_empty', bool, 'no'), - returns='ok. {0} messages deleted.'), - 'queue.purge': Spec(('queue', str), - returns='ok. {0} messages deleted.'), - 'basic.get': Spec(('queue', str), - ('no_ack', bool, 'off'), - returns=dump_message), - 'basic.publish': Spec(('msg', str), - ('exchange', str), - ('routing_key', str), - ('mandatory', bool, 'no'), - ('immediate', bool, 'no')), - 'basic.ack': Spec(('delivery_tag', int)), - } - - def _prepare_spec(self, conn): - # XXX Hack to fix Issue #2013 - from amqp import Connection, Message - if isinstance(conn.connection, Connection): - self.amqp['basic.publish'] = Spec(('msg', Message), - ('exchange', str), - ('routing_key', str), - ('mandatory', bool, 'no'), - ('immediate', bool, 'no')) - - def __init__(self, *args, **kwargs): - self.connect = kwargs.pop('connect') - self.silent = kwargs.pop('silent', False) - self.out = kwargs.pop('out', sys.stderr) - cmd.Cmd.__init__(self, *args, **kwargs) - self._reconnect() - - def note(self, m): - """Say something to the user. Disabled if :attr:`silent`.""" - if not self.silent: - say(m, file=self.out) - - def say(self, m): - say(m, file=self.out) - - def get_amqp_api_command(self, cmd, arglist): - """With a command name and a list of arguments, convert the arguments - to Python values and find the corresponding method on the AMQP channel - object. - - :returns: tuple of `(method, processed_args)`. - - """ - spec = self.amqp[cmd] - args = spec.str_args_to_python(arglist) - attr_name = cmd.replace('.', '_') - if self.needs_reconnect: - self._reconnect() - return getattr(self.chan, attr_name), args, spec.format_response - - def do_exit(self, *args): - """The `'exit'` command.""" - self.note("\n-> please, don't leave!") - sys.exit(0) - - def display_command_help(self, cmd, short=False): - spec = self.amqp[cmd] - self.say('{0} {1}'.format(cmd, spec.format_signature())) - - def do_help(self, *args): - if not args: - self.say(HELP_HEADER) - for cmd_name in self.amqp: - self.display_command_help(cmd_name, short=True) - self.say(EXAMPLE_TEXT) - else: - self.display_command_help(args[0]) - - def default(self, line): - self.say("unknown syntax: {0!r}. how about some 'help'?".format(line)) - - def get_names(self): - return set(self.builtins) | set(self.amqp) - - def completenames(self, text, *ignored): - """Return all commands starting with `text`, for tab-completion.""" - names = self.get_names() - first = [cmd for cmd in names - if cmd.startswith(text.replace('_', '.'))] - if first: - return first - return [cmd for cmd in names - if cmd.partition('.')[2].startswith(text)] - - def dispatch(self, cmd, argline): - """Dispatch and execute the command. - - Lookup order is: :attr:`builtins` -> :attr:`amqp`. - - """ - arglist = shlex.split(safe_str(argline)) - if cmd in self.builtins: - return getattr(self, self.builtins[cmd])(*arglist) - fun, args, formatter = self.get_amqp_api_command(cmd, arglist) - return formatter(fun(*args)) - - def parseline(self, line): - """Parse input line. - - :returns: tuple of three items: - `(command_name, arglist, original_line)` - - """ - parts = line.split() - if parts: - return parts[0], ' '.join(parts[1:]), line - return '', '', line - - def onecmd(self, line): - """Parse line and execute command.""" - cmd, arg, line = self.parseline(line) - if not line: - return self.emptyline() - self.lastcmd = line - self.counter = next(self.inc_counter) - try: - self.respond(self.dispatch(cmd, arg)) - except (AttributeError, KeyError) as exc: - self.default(line) - except Exception as exc: - self.say(exc) - self.needs_reconnect = True - - def respond(self, retval): - """What to do with the return value of a command.""" - if retval is not None: - if isinstance(retval, string_t): - self.say(retval) - else: - self.say(pprint.pformat(retval)) - - def _reconnect(self): - """Re-establish connection to the AMQP server.""" - self.conn = self.connect(self.conn) - self._prepare_spec(self.conn) - self.chan = self.conn.default_channel - self.needs_reconnect = False - - @property - def prompt(self): - return self.prompt_fmt.format(self=self) - - -class AMQPAdmin(object): - """The celery :program:`celery amqp` utility.""" - Shell = AMQShell - - def __init__(self, *args, **kwargs): - self.app = kwargs['app'] - self.out = kwargs.setdefault('out', sys.stderr) - self.silent = kwargs.get('silent') - self.args = args - - def connect(self, conn=None): - if conn: - conn.close() - conn = self.app.connection() - self.note('-> connecting to {0}.'.format(conn.as_uri())) - conn.connect() - self.note('-> connected.') - return conn - - def run(self): - shell = self.Shell(connect=self.connect, out=self.out) - if self.args: - return shell.onecmd(' '.join(self.args)) - try: - return shell.cmdloop() - except KeyboardInterrupt: - self.note('(bibi)') - pass - - def note(self, m): - if not self.silent: - say(m, file=self.out) - - -class amqp(Command): - """AMQP Administration Shell. - - Also works for non-amqp transports (but not ones that - store declarations in memory). - - Examples:: - - celery amqp - start shell mode - celery amqp help - show list of commands - - celery amqp exchange.delete name - celery amqp queue.delete queue - celery amqp queue.delete queue yes yes - - """ - - def run(self, *args, **options): - options['app'] = self.app - return AMQPAdmin(*args, **options).run() - - -def main(): - amqp().execute_from_commandline() - -if __name__ == '__main__': # pragma: no cover - main() diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/base.py b/thesisenv/lib/python3.6/site-packages/celery/bin/base.py deleted file mode 100644 index 9044b7b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/bin/base.py +++ /dev/null @@ -1,668 +0,0 @@ -# -*- coding: utf-8 -*- -""" - -.. _preload-options: - -Preload Options ---------------- - -These options are supported by all commands, -and usually parsed before command-specific arguments. - -.. cmdoption:: -A, --app - - app instance to use (e.g. module.attr_name) - -.. cmdoption:: -b, --broker - - url to broker. default is 'amqp://guest@localhost//' - -.. cmdoption:: --loader - - name of custom loader class to use. - -.. cmdoption:: --config - - Name of the configuration module - -.. _daemon-options: - -Daemon Options --------------- - -These options are supported by commands that can detach -into the background (daemon). They will be present -in any command that also has a `--detach` option. - -.. cmdoption:: -f, --logfile - - Path to log file. If no logfile is specified, `stderr` is used. - -.. cmdoption:: --pidfile - - Optional file used to store the process pid. - - The program will not start if this file already exists - and the pid is still alive. - -.. cmdoption:: --uid - - User id, or user name of the user to run as after detaching. - -.. cmdoption:: --gid - - Group id, or group name of the main group to change to after - detaching. - -.. cmdoption:: --umask - - Effective umask (in octal) of the process after detaching. Inherits - the umask of the parent process by default. - -.. cmdoption:: --workdir - - Optional directory to change to after detaching. - -.. cmdoption:: --executable - - Executable to use for the detached process. - -""" -from __future__ import absolute_import, print_function, unicode_literals - -import os -import random -import re -import sys -import warnings -import json - -from collections import defaultdict -from heapq import heappush -from inspect import getargspec -from optparse import OptionParser, IndentedHelpFormatter, make_option as Option -from pprint import pformat - -from celery import VERSION_BANNER, Celery, maybe_patch_concurrency -from celery import signals -from celery.exceptions import CDeprecationWarning, CPendingDeprecationWarning -from celery.five import items, string, string_t -from celery.platforms import EX_FAILURE, EX_OK, EX_USAGE -from celery.utils import term -from celery.utils import text -from celery.utils import node_format, host_format -from celery.utils.imports import symbol_by_name, import_from_cwd - -try: - input = raw_input -except NameError: - pass - -# always enable DeprecationWarnings, so our users can see them. -for warning in (CDeprecationWarning, CPendingDeprecationWarning): - warnings.simplefilter('once', warning, 0) - -ARGV_DISABLED = """ -Unrecognized command-line arguments: {0} - -Try --help? -""" - -find_long_opt = re.compile(r'.+?(--.+?)(?:\s|,|$)') -find_rst_ref = re.compile(r':\w+:`(.+?)`') - -__all__ = ['Error', 'UsageError', 'Extensions', 'HelpFormatter', - 'Command', 'Option', 'daemon_options'] - - -class Error(Exception): - status = EX_FAILURE - - def __init__(self, reason, status=None): - self.reason = reason - self.status = status if status is not None else self.status - super(Error, self).__init__(reason, status) - - def __str__(self): - return self.reason - __unicode__ = __str__ - - -class UsageError(Error): - status = EX_USAGE - - -class Extensions(object): - - def __init__(self, namespace, register): - self.names = [] - self.namespace = namespace - self.register = register - - def add(self, cls, name): - heappush(self.names, name) - self.register(cls, name=name) - - def load(self): - try: - from pkg_resources import iter_entry_points - except ImportError: # pragma: no cover - return - - for ep in iter_entry_points(self.namespace): - sym = ':'.join([ep.module_name, ep.attrs[0]]) - try: - cls = symbol_by_name(sym) - except (ImportError, SyntaxError) as exc: - warnings.warn( - 'Cannot load extension {0!r}: {1!r}'.format(sym, exc)) - else: - self.add(cls, ep.name) - return self.names - - -class HelpFormatter(IndentedHelpFormatter): - - def format_epilog(self, epilog): - if epilog: - return '\n{0}\n\n'.format(epilog) - return '' - - def format_description(self, description): - return text.ensure_2lines(text.fill_paragraphs( - text.dedent(description), self.width)) - - -class Command(object): - """Base class for command-line applications. - - :keyword app: The current app. - :keyword get_app: Callable returning the current app if no app provided. - - """ - Error = Error - UsageError = UsageError - Parser = OptionParser - - #: Arg list used in help. - args = '' - - #: Application version. - version = VERSION_BANNER - - #: If false the parser will raise an exception if positional - #: args are provided. - supports_args = True - - #: List of options (without preload options). - option_list = () - - # module Rst documentation to parse help from (if any) - doc = None - - # Some programs (multi) does not want to load the app specified - # (Issue #1008). - respects_app_option = True - - #: List of options to parse before parsing other options. - preload_options = ( - Option('-A', '--app', default=None), - Option('-b', '--broker', default=None), - Option('--loader', default=None), - Option('--config', default=None), - Option('--workdir', default=None, dest='working_directory'), - Option('--no-color', '-C', action='store_true', default=None), - Option('--quiet', '-q', action='store_true'), - ) - - #: Enable if the application should support config from the cmdline. - enable_config_from_cmdline = False - - #: Default configuration namespace. - namespace = 'celery' - - #: Text to print at end of --help - epilog = None - - #: Text to print in --help before option list. - description = '' - - #: Set to true if this command doesn't have subcommands - leaf = True - - # used by :meth:`say_remote_command_reply`. - show_body = True - # used by :meth:`say_chat`. - show_reply = True - - prog_name = 'celery' - - def __init__(self, app=None, get_app=None, no_color=False, - stdout=None, stderr=None, quiet=False, on_error=None, - on_usage_error=None): - self.app = app - self.get_app = get_app or self._get_default_app - self.stdout = stdout or sys.stdout - self.stderr = stderr or sys.stderr - self._colored = None - self._no_color = no_color - self.quiet = quiet - if not self.description: - self.description = self.__doc__ - if on_error: - self.on_error = on_error - if on_usage_error: - self.on_usage_error = on_usage_error - - def run(self, *args, **options): - """This is the body of the command called by :meth:`handle_argv`.""" - raise NotImplementedError('subclass responsibility') - - def on_error(self, exc): - self.error(self.colored.red('Error: {0}'.format(exc))) - - def on_usage_error(self, exc): - self.handle_error(exc) - - def on_concurrency_setup(self): - pass - - def __call__(self, *args, **kwargs): - random.seed() # maybe we were forked. - self.verify_args(args) - try: - ret = self.run(*args, **kwargs) - return ret if ret is not None else EX_OK - except self.UsageError as exc: - self.on_usage_error(exc) - return exc.status - except self.Error as exc: - self.on_error(exc) - return exc.status - - def verify_args(self, given, _index=0): - S = getargspec(self.run) - _index = 1 if S.args and S.args[0] == 'self' else _index - required = S.args[_index:-len(S.defaults) if S.defaults else None] - missing = required[len(given):] - if missing: - raise self.UsageError('Missing required {0}: {1}'.format( - text.pluralize(len(missing), 'argument'), - ', '.join(missing) - )) - - def execute_from_commandline(self, argv=None): - """Execute application from command-line. - - :keyword argv: The list of command-line arguments. - Defaults to ``sys.argv``. - - """ - if argv is None: - argv = list(sys.argv) - # Should we load any special concurrency environment? - self.maybe_patch_concurrency(argv) - self.on_concurrency_setup() - - # Dump version and exit if '--version' arg set. - self.early_version(argv) - argv = self.setup_app_from_commandline(argv) - self.prog_name = os.path.basename(argv[0]) - return self.handle_argv(self.prog_name, argv[1:]) - - def run_from_argv(self, prog_name, argv=None, command=None): - return self.handle_argv(prog_name, - sys.argv if argv is None else argv, command) - - def maybe_patch_concurrency(self, argv=None): - argv = argv or sys.argv - pool_option = self.with_pool_option(argv) - if pool_option: - maybe_patch_concurrency(argv, *pool_option) - short_opts, long_opts = pool_option - - def usage(self, command): - return '%prog {0} [options] {self.args}'.format(command, self=self) - - def get_options(self): - """Get supported command-line options.""" - return self.option_list - - def expanduser(self, value): - if isinstance(value, string_t): - return os.path.expanduser(value) - return value - - def ask(self, q, choices, default=None): - """Prompt user to choose from a tuple of string values. - - :param q: the question to ask (do not include questionark) - :param choice: tuple of possible choices, must be lowercase. - :param default: Default value if any. - - If a default is not specified the question will be repeated - until the user gives a valid choice. - - Matching is done case insensitively. - - """ - schoices = choices - if default is not None: - schoices = [c.upper() if c == default else c.lower() - for c in choices] - schoices = '/'.join(schoices) - - p = '{0} ({1})? '.format(q.capitalize(), schoices) - while 1: - val = input(p).lower() - if val in choices: - return val - elif default is not None: - break - return default - - def handle_argv(self, prog_name, argv, command=None): - """Parse command-line arguments from ``argv`` and dispatch - to :meth:`run`. - - :param prog_name: The program name (``argv[0]``). - :param argv: Command arguments. - - Exits with an error message if :attr:`supports_args` is disabled - and ``argv`` contains positional arguments. - - """ - options, args = self.prepare_args( - *self.parse_options(prog_name, argv, command)) - return self(*args, **options) - - def prepare_args(self, options, args): - if options: - options = dict((k, self.expanduser(v)) - for k, v in items(vars(options)) - if not k.startswith('_')) - args = [self.expanduser(arg) for arg in args] - self.check_args(args) - return options, args - - def check_args(self, args): - if not self.supports_args and args: - self.die(ARGV_DISABLED.format(', '.join(args)), EX_USAGE) - - def error(self, s): - self.out(s, fh=self.stderr) - - def out(self, s, fh=None): - print(s, file=fh or self.stdout) - - def die(self, msg, status=EX_FAILURE): - self.error(msg) - sys.exit(status) - - def early_version(self, argv): - if '--version' in argv: - print(self.version, file=self.stdout) - sys.exit(0) - - def parse_options(self, prog_name, arguments, command=None): - """Parse the available options.""" - # Don't want to load configuration to just print the version, - # so we handle --version manually here. - self.parser = self.create_parser(prog_name, command) - return self.parser.parse_args(arguments) - - def create_parser(self, prog_name, command=None): - option_list = ( - self.preload_options + - self.get_options() + - tuple(self.app.user_options['preload']) - ) - return self.prepare_parser(self.Parser( - prog=prog_name, - usage=self.usage(command), - version=self.version, - epilog=self.epilog, - formatter=HelpFormatter(), - description=self.description, - option_list=option_list, - )) - - def prepare_parser(self, parser): - docs = [self.parse_doc(doc) for doc in (self.doc, __doc__) if doc] - for doc in docs: - for long_opt, help in items(doc): - option = parser.get_option(long_opt) - if option is not None: - option.help = ' '.join(help).format(default=option.default) - return parser - - def setup_app_from_commandline(self, argv): - preload_options = self.parse_preload_options(argv) - quiet = preload_options.get('quiet') - if quiet is not None: - self.quiet = quiet - try: - self.no_color = preload_options['no_color'] - except KeyError: - pass - workdir = preload_options.get('working_directory') - if workdir: - os.chdir(workdir) - app = (preload_options.get('app') or - os.environ.get('CELERY_APP') or - self.app) - preload_loader = preload_options.get('loader') - if preload_loader: - # Default app takes loader from this env (Issue #1066). - os.environ['CELERY_LOADER'] = preload_loader - loader = (preload_loader, - os.environ.get('CELERY_LOADER') or - 'default') - broker = preload_options.get('broker', None) - if broker: - os.environ['CELERY_BROKER_URL'] = broker - config = preload_options.get('config') - if config: - os.environ['CELERY_CONFIG_MODULE'] = config - if self.respects_app_option: - if app: - self.app = self.find_app(app) - elif self.app is None: - self.app = self.get_app(loader=loader) - if self.enable_config_from_cmdline: - argv = self.process_cmdline_config(argv) - else: - self.app = Celery(fixups=[]) - - user_preload = tuple(self.app.user_options['preload'] or ()) - if user_preload: - user_options = self.preparse_options(argv, user_preload) - for user_option in user_preload: - user_options.setdefault(user_option.dest, user_option.default) - signals.user_preload_options.send( - sender=self, app=self.app, options=user_options, - ) - return argv - - def find_app(self, app): - from celery.app.utils import find_app - return find_app(app, symbol_by_name=self.symbol_by_name) - - def symbol_by_name(self, name, imp=import_from_cwd): - return symbol_by_name(name, imp=imp) - get_cls_by_name = symbol_by_name # XXX compat - - def process_cmdline_config(self, argv): - try: - cargs_start = argv.index('--') - except ValueError: - return argv - argv, cargs = argv[:cargs_start], argv[cargs_start + 1:] - self.app.config_from_cmdline(cargs, namespace=self.namespace) - return argv - - def parse_preload_options(self, args): - return self.preparse_options(args, self.preload_options) - - def add_append_opt(self, acc, opt, value): - acc.setdefault(opt.dest, opt.default or []) - acc[opt.dest].append(value) - - def preparse_options(self, args, options): - acc = {} - opts = {} - for opt in options: - for t in (opt._long_opts, opt._short_opts): - opts.update(dict(zip(t, [opt] * len(t)))) - index = 0 - length = len(args) - while index < length: - arg = args[index] - if arg.startswith('--'): - if '=' in arg: - key, value = arg.split('=', 1) - opt = opts.get(key) - if opt: - if opt.action == 'append': - self.add_append_opt(acc, opt, value) - else: - acc[opt.dest] = value - else: - opt = opts.get(arg) - if opt and opt.takes_value(): - # optparse also supports ['--opt', 'value'] - # (Issue #1668) - if opt.action == 'append': - self.add_append_opt(acc, opt, args[index + 1]) - else: - acc[opt.dest] = args[index + 1] - index += 1 - elif opt and opt.action == 'store_true': - acc[opt.dest] = True - elif arg.startswith('-'): - opt = opts.get(arg) - if opt: - if opt.takes_value(): - try: - acc[opt.dest] = args[index + 1] - except IndexError: - raise ValueError( - 'Missing required argument for {0}'.format( - arg)) - index += 1 - elif opt.action == 'store_true': - acc[opt.dest] = True - index += 1 - return acc - - def parse_doc(self, doc): - options, in_option = defaultdict(list), None - for line in doc.splitlines(): - if line.startswith('.. cmdoption::'): - m = find_long_opt.match(line) - if m: - in_option = m.groups()[0].strip() - assert in_option, 'missing long opt' - elif in_option and line.startswith(' ' * 4): - options[in_option].append( - find_rst_ref.sub(r'\1', line.strip()).replace('`', '')) - return options - - def with_pool_option(self, argv): - """Return tuple of ``(short_opts, long_opts)`` if the command - supports a pool argument, and used to monkey patch eventlet/gevent - environments as early as possible. - - E.g:: - has_pool_option = (['-P'], ['--pool']) - """ - pass - - def node_format(self, s, nodename, **extra): - return node_format(s, nodename, **extra) - - def host_format(self, s, **extra): - return host_format(s, **extra) - - def _get_default_app(self, *args, **kwargs): - from celery._state import get_current_app - return get_current_app() # omit proxy - - def pretty_list(self, n): - c = self.colored - if not n: - return '- empty -' - return '\n'.join( - str(c.reset(c.white('*'), ' {0}'.format(item))) for item in n - ) - - def pretty_dict_ok_error(self, n): - c = self.colored - try: - return (c.green('OK'), - text.indent(self.pretty(n['ok'])[1], 4)) - except KeyError: - pass - return (c.red('ERROR'), - text.indent(self.pretty(n['error'])[1], 4)) - - def say_remote_command_reply(self, replies): - c = self.colored - node = next(iter(replies)) # <-- take first. - reply = replies[node] - status, preply = self.pretty(reply) - self.say_chat('->', c.cyan(node, ': ') + status, - text.indent(preply, 4) if self.show_reply else '') - - def pretty(self, n): - OK = str(self.colored.green('OK')) - if isinstance(n, list): - return OK, self.pretty_list(n) - if isinstance(n, dict): - if 'ok' in n or 'error' in n: - return self.pretty_dict_ok_error(n) - else: - return OK, json.dumps(n, sort_keys=True, indent=4) - if isinstance(n, string_t): - return OK, string(n) - return OK, pformat(n) - - def say_chat(self, direction, title, body=''): - c = self.colored - if direction == '<-' and self.quiet: - return - dirstr = not self.quiet and c.bold(c.white(direction), ' ') or '' - self.out(c.reset(dirstr, title)) - if body and self.show_body: - self.out(body) - - @property - def colored(self): - if self._colored is None: - self._colored = term.colored(enabled=not self.no_color) - return self._colored - - @colored.setter - def colored(self, obj): - self._colored = obj - - @property - def no_color(self): - return self._no_color - - @no_color.setter - def no_color(self, value): - self._no_color = value - if self._colored is not None: - self._colored.enabled = not self._no_color - - -def daemon_options(default_pidfile=None, default_logfile=None): - return ( - Option('-f', '--logfile', default=default_logfile), - Option('--pidfile', default=default_pidfile), - Option('--uid', default=None), - Option('--gid', default=None), - Option('--umask', default=None), - Option('--executable', default=None), - ) diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/beat.py b/thesisenv/lib/python3.6/site-packages/celery/bin/beat.py deleted file mode 100644 index 4bcbc62..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/bin/beat.py +++ /dev/null @@ -1,100 +0,0 @@ -# -*- coding: utf-8 -*- -""" - -The :program:`celery beat` command. - -.. program:: celery beat - -.. seealso:: - - See :ref:`preload-options` and :ref:`daemon-options`. - -.. cmdoption:: --detach - - Detach and run in the background as a daemon. - -.. cmdoption:: -s, --schedule - - Path to the schedule database. Defaults to `celerybeat-schedule`. - The extension '.db' may be appended to the filename. - Default is {default}. - -.. cmdoption:: -S, --scheduler - - Scheduler class to use. - Default is :class:`celery.beat.PersistentScheduler`. - -.. cmdoption:: --max-interval - - Max seconds to sleep between schedule iterations. - -.. cmdoption:: -f, --logfile - - Path to log file. If no logfile is specified, `stderr` is used. - -.. cmdoption:: -l, --loglevel - - Logging level, choose between `DEBUG`, `INFO`, `WARNING`, - `ERROR`, `CRITICAL`, or `FATAL`. - -""" -from __future__ import absolute_import - -from functools import partial - -from celery.platforms import detached, maybe_drop_privileges - -from celery.bin.base import Command, Option, daemon_options - -__all__ = ['beat'] - - -class beat(Command): - """Start the beat periodic task scheduler. - - Examples:: - - celery beat -l info - celery beat -s /var/run/celery/beat-schedule --detach - celery beat -S djcelery.schedulers.DatabaseScheduler - - """ - doc = __doc__ - enable_config_from_cmdline = True - supports_args = False - - def run(self, detach=False, logfile=None, pidfile=None, uid=None, - gid=None, umask=None, working_directory=None, **kwargs): - if not detach: - maybe_drop_privileges(uid=uid, gid=gid) - workdir = working_directory - kwargs.pop('app', None) - beat = partial(self.app.Beat, - logfile=logfile, pidfile=pidfile, **kwargs) - - if detach: - with detached(logfile, pidfile, uid, gid, umask, workdir): - return beat().run() - else: - return beat().run() - - def get_options(self): - c = self.app.conf - - return ( - (Option('--detach', action='store_true'), - Option('-s', '--schedule', - default=c.CELERYBEAT_SCHEDULE_FILENAME), - Option('--max-interval', type='float'), - Option('-S', '--scheduler', dest='scheduler_cls'), - Option('-l', '--loglevel', default=c.CELERYBEAT_LOG_LEVEL)) + - daemon_options(default_pidfile='celerybeat.pid') + - tuple(self.app.user_options['beat']) - ) - - -def main(app=None): - beat(app=app).execute_from_commandline() - -if __name__ == '__main__': # pragma: no cover - main() diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/celery.py b/thesisenv/lib/python3.6/site-packages/celery/bin/celery.py deleted file mode 100644 index 4676b30..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/bin/celery.py +++ /dev/null @@ -1,850 +0,0 @@ -# -*- coding: utf-8 -*- -""" - -The :program:`celery` umbrella command. - -.. program:: celery - -""" -from __future__ import absolute_import, unicode_literals - -import anyjson -import numbers -import os -import sys - -from functools import partial -from importlib import import_module - -from celery.five import string_t, values -from celery.platforms import EX_OK, EX_FAILURE, EX_UNAVAILABLE, EX_USAGE -from celery.utils import term -from celery.utils import text -from celery.utils.timeutils import maybe_iso8601 - -# Cannot use relative imports here due to a Windows issue (#1111). -from celery.bin.base import Command, Option, Extensions - -# Import commands from other modules -from celery.bin.amqp import amqp -from celery.bin.beat import beat -from celery.bin.events import events -from celery.bin.graph import graph -from celery.bin.worker import worker - -__all__ = ['CeleryCommand', 'main'] - -HELP = """ ----- -- - - ---- Commands- -------------- --- ------------ - -{commands} ----- -- - - --------- -- - -------------- --- ------------ - -Type '{prog_name} --help' for help using a specific command. -""" - -MIGRATE_PROGRESS_FMT = """\ -Migrating task {state.count}/{state.strtotal}: \ -{body[task]}[{body[id]}]\ -""" - -DEBUG = os.environ.get('C_DEBUG', False) - -command_classes = [ - ('Main', ['worker', 'events', 'beat', 'shell', 'multi', 'amqp'], 'green'), - ('Remote Control', ['status', 'inspect', 'control'], 'blue'), - ('Utils', ['purge', 'list', 'migrate', 'call', 'result', 'report'], None), -] -if DEBUG: # pragma: no cover - command_classes.append( - ('Debug', ['graph'], 'red'), - ) - - -def determine_exit_status(ret): - if isinstance(ret, numbers.Integral): - return ret - return EX_OK if ret else EX_FAILURE - - -def main(argv=None): - # Fix for setuptools generated scripts, so that it will - # work with multiprocessing fork emulation. - # (see multiprocessing.forking.get_preparation_data()) - try: - if __name__ != '__main__': # pragma: no cover - sys.modules['__main__'] = sys.modules[__name__] - cmd = CeleryCommand() - cmd.maybe_patch_concurrency() - from billiard import freeze_support - freeze_support() - cmd.execute_from_commandline(argv) - except KeyboardInterrupt: - pass - - -class multi(Command): - """Start multiple worker instances.""" - respects_app_option = False - - def get_options(self): - return () - - def run_from_argv(self, prog_name, argv, command=None): - from celery.bin.multi import MultiTool - multi = MultiTool(quiet=self.quiet, no_color=self.no_color) - return multi.execute_from_commandline( - [command] + argv, prog_name, - ) - - -class list_(Command): - """Get info from broker. - - Examples:: - - celery list bindings - - NOTE: For RabbitMQ the management plugin is required. - """ - args = '[bindings]' - - def list_bindings(self, management): - try: - bindings = management.get_bindings() - except NotImplementedError: - raise self.Error('Your transport cannot list bindings.') - - def fmt(q, e, r): - return self.out('{0:<28} {1:<28} {2}'.format(q, e, r)) - fmt('Queue', 'Exchange', 'Routing Key') - fmt('-' * 16, '-' * 16, '-' * 16) - for b in bindings: - fmt(b['destination'], b['source'], b['routing_key']) - - def run(self, what=None, *_, **kw): - topics = {'bindings': self.list_bindings} - available = ', '.join(topics) - if not what: - raise self.UsageError( - 'You must specify one of {0}'.format(available)) - if what not in topics: - raise self.UsageError( - 'unknown topic {0!r} (choose one of: {1})'.format( - what, available)) - with self.app.connection() as conn: - self.app.amqp.TaskConsumer(conn).declare() - topics[what](conn.manager) - - -class call(Command): - """Call a task by name. - - Examples:: - - celery call tasks.add --args='[2, 2]' - celery call tasks.add --args='[2, 2]' --countdown=10 - """ - args = '' - option_list = Command.option_list + ( - Option('--args', '-a', help='positional arguments (json).'), - Option('--kwargs', '-k', help='keyword arguments (json).'), - Option('--eta', help='scheduled time (ISO-8601).'), - Option('--countdown', type='float', - help='eta in seconds from now (float/int).'), - Option('--expires', help='expiry time (ISO-8601/float/int).'), - Option('--serializer', default='json', help='defaults to json.'), - Option('--queue', help='custom queue name.'), - Option('--exchange', help='custom exchange name.'), - Option('--routing-key', help='custom routing key.'), - ) - - def run(self, name, *_, **kw): - # Positional args. - args = kw.get('args') or () - if isinstance(args, string_t): - args = anyjson.loads(args) - - # Keyword args. - kwargs = kw.get('kwargs') or {} - if isinstance(kwargs, string_t): - kwargs = anyjson.loads(kwargs) - - # Expires can be int/float. - expires = kw.get('expires') or None - try: - expires = float(expires) - except (TypeError, ValueError): - # or a string describing an ISO 8601 datetime. - try: - expires = maybe_iso8601(expires) - except (TypeError, ValueError): - raise - - res = self.app.send_task(name, args=args, kwargs=kwargs, - countdown=kw.get('countdown'), - serializer=kw.get('serializer'), - queue=kw.get('queue'), - exchange=kw.get('exchange'), - routing_key=kw.get('routing_key'), - eta=maybe_iso8601(kw.get('eta')), - expires=expires) - self.out(res.id) - - -class purge(Command): - """Erase all messages from all known task queues. - - WARNING: There is no undo operation for this command. - - """ - warn_prelude = ( - '{warning}: This will remove all tasks from {queues}: {names}.\n' - ' There is no undo for this operation!\n\n' - '(to skip this prompt use the -f option)\n' - ) - warn_prompt = 'Are you sure you want to delete all tasks' - fmt_purged = 'Purged {mnum} {messages} from {qnum} known task {queues}.' - fmt_empty = 'No messages purged from {qnum} {queues}' - option_list = Command.option_list + ( - Option('--force', '-f', action='store_true', - help='Do not prompt for verification'), - ) - - def run(self, force=False, **kwargs): - names = list(sorted(self.app.amqp.queues.keys())) - qnum = len(names) - if not force: - self.out(self.warn_prelude.format( - warning=self.colored.red('WARNING'), - queues=text.pluralize(qnum, 'queue'), names=', '.join(names), - )) - if self.ask(self.warn_prompt, ('yes', 'no'), 'no') != 'yes': - return - messages = self.app.control.purge() - fmt = self.fmt_purged if messages else self.fmt_empty - self.out(fmt.format( - mnum=messages, qnum=qnum, - messages=text.pluralize(messages, 'message'), - queues=text.pluralize(qnum, 'queue'))) - - -class result(Command): - """Gives the return value for a given task id. - - Examples:: - - celery result 8f511516-e2f5-4da4-9d2f-0fb83a86e500 - celery result 8f511516-e2f5-4da4-9d2f-0fb83a86e500 -t tasks.add - celery result 8f511516-e2f5-4da4-9d2f-0fb83a86e500 --traceback - - """ - args = '' - option_list = Command.option_list + ( - Option('--task', '-t', help='name of task (if custom backend)'), - Option('--traceback', action='store_true', - help='show traceback instead'), - ) - - def run(self, task_id, *args, **kwargs): - result_cls = self.app.AsyncResult - task = kwargs.get('task') - traceback = kwargs.get('traceback', False) - - if task: - result_cls = self.app.tasks[task].AsyncResult - result = result_cls(task_id) - if traceback: - value = result.traceback - else: - value = result.get() - self.out(self.pretty(value)[1]) - - -class _RemoteControl(Command): - name = None - choices = None - leaf = False - option_list = Command.option_list + ( - Option('--timeout', '-t', type='float', - help='Timeout in seconds (float) waiting for reply'), - Option('--destination', '-d', - help='Comma separated list of destination node names.')) - - def __init__(self, *args, **kwargs): - self.show_body = kwargs.pop('show_body', True) - self.show_reply = kwargs.pop('show_reply', True) - super(_RemoteControl, self).__init__(*args, **kwargs) - - @classmethod - def get_command_info(self, command, - indent=0, prefix='', color=None, help=False): - if help: - help = '|' + text.indent(self.choices[command][1], indent + 4) - else: - help = None - try: - # see if it uses args. - meth = getattr(self, command) - return text.join([ - '|' + text.indent('{0}{1} {2}'.format( - prefix, color(command), meth.__doc__), indent), - help, - ]) - - except AttributeError: - return text.join([ - '|' + text.indent(prefix + str(color(command)), indent), help, - ]) - - @classmethod - def list_commands(self, indent=0, prefix='', color=None, help=False): - color = color if color else lambda x: x - prefix = prefix + ' ' if prefix else '' - return '\n'.join(self.get_command_info(c, indent, prefix, color, help) - for c in sorted(self.choices)) - - @property - def epilog(self): - return '\n'.join([ - '[Commands]', - self.list_commands(indent=4, help=True) - ]) - - def usage(self, command): - return '%prog {0} [options] {1} [arg1 .. argN]'.format( - command, self.args) - - def call(self, *args, **kwargs): - raise NotImplementedError('call') - - def run(self, *args, **kwargs): - if not args: - raise self.UsageError( - 'Missing {0.name} method. See --help'.format(self)) - return self.do_call_method(args, **kwargs) - - def do_call_method(self, args, **kwargs): - method = args[0] - if method == 'help': - raise self.Error("Did you mean '{0.name} --help'?".format(self)) - if method not in self.choices: - raise self.UsageError( - 'Unknown {0.name} method {1}'.format(self, method)) - - if self.app.connection().transport.driver_type == 'sql': - raise self.Error('Broadcast not supported by SQL broker transport') - - destination = kwargs.get('destination') - timeout = kwargs.get('timeout') or self.choices[method][0] - if destination and isinstance(destination, string_t): - destination = [dest.strip() for dest in destination.split(',')] - - handler = getattr(self, method, self.call) - - replies = handler(method, *args[1:], timeout=timeout, - destination=destination, - callback=self.say_remote_command_reply) - if not replies: - raise self.Error('No nodes replied within time constraint.', - status=EX_UNAVAILABLE) - return replies - - -class inspect(_RemoteControl): - """Inspect the worker at runtime. - - Availability: RabbitMQ (amqp), Redis, and MongoDB transports. - - Examples:: - - celery inspect active --timeout=5 - celery inspect scheduled -d worker1@example.com - celery inspect revoked -d w1@e.com,w2@e.com - - """ - name = 'inspect' - choices = { - 'active': (1.0, 'dump active tasks (being processed)'), - 'active_queues': (1.0, 'dump queues being consumed from'), - 'scheduled': (1.0, 'dump scheduled tasks (eta/countdown/retry)'), - 'reserved': (1.0, 'dump reserved tasks (waiting to be processed)'), - 'stats': (1.0, 'dump worker statistics'), - 'revoked': (1.0, 'dump of revoked task ids'), - 'registered': (1.0, 'dump of registered tasks'), - 'ping': (0.2, 'ping worker(s)'), - 'clock': (1.0, 'get value of logical clock'), - 'conf': (1.0, 'dump worker configuration'), - 'report': (1.0, 'get bugreport info'), - 'memsample': (1.0, 'sample memory (requires psutil)'), - 'memdump': (1.0, 'dump memory samples (requires psutil)'), - 'objgraph': (60.0, 'create object graph (requires objgraph)'), - } - - def call(self, method, *args, **options): - i = self.app.control.inspect(**options) - return getattr(i, method)(*args) - - def objgraph(self, type_='Request', *args, **kwargs): - return self.call('objgraph', type_, **kwargs) - - def conf(self, with_defaults=False, *args, **kwargs): - return self.call('conf', with_defaults, **kwargs) - - -class control(_RemoteControl): - """Workers remote control. - - Availability: RabbitMQ (amqp), Redis, and MongoDB transports. - - Examples:: - - celery control enable_events --timeout=5 - celery control -d worker1@example.com enable_events - celery control -d w1.e.com,w2.e.com enable_events - - celery control -d w1.e.com add_consumer queue_name - celery control -d w1.e.com cancel_consumer queue_name - - celery control -d w1.e.com add_consumer queue exchange direct rkey - - """ - name = 'control' - choices = { - 'enable_events': (1.0, 'tell worker(s) to enable events'), - 'disable_events': (1.0, 'tell worker(s) to disable events'), - 'add_consumer': (1.0, 'tell worker(s) to start consuming a queue'), - 'cancel_consumer': (1.0, 'tell worker(s) to stop consuming a queue'), - 'rate_limit': ( - 1.0, 'tell worker(s) to modify the rate limit for a task type'), - 'time_limit': ( - 1.0, 'tell worker(s) to modify the time limit for a task type.'), - 'autoscale': (1.0, 'change autoscale settings'), - 'pool_grow': (1.0, 'start more pool processes'), - 'pool_shrink': (1.0, 'use less pool processes'), - } - - def call(self, method, *args, **options): - return getattr(self.app.control, method)(*args, reply=True, **options) - - def pool_grow(self, method, n=1, **kwargs): - """[N=1]""" - return self.call(method, int(n), **kwargs) - - def pool_shrink(self, method, n=1, **kwargs): - """[N=1]""" - return self.call(method, int(n), **kwargs) - - def autoscale(self, method, max=None, min=None, **kwargs): - """[max] [min]""" - return self.call(method, int(max), int(min), **kwargs) - - def rate_limit(self, method, task_name, rate_limit, **kwargs): - """ (e.g. 5/s | 5/m | 5/h)>""" - return self.call(method, task_name, rate_limit, **kwargs) - - def time_limit(self, method, task_name, soft, hard=None, **kwargs): - """ [hard_secs]""" - return self.call(method, task_name, - float(soft), float(hard), **kwargs) - - def add_consumer(self, method, queue, exchange=None, - exchange_type='direct', routing_key=None, **kwargs): - """ [exchange [type [routing_key]]]""" - return self.call(method, queue, exchange, - exchange_type, routing_key, **kwargs) - - def cancel_consumer(self, method, queue, **kwargs): - """""" - return self.call(method, queue, **kwargs) - - -class status(Command): - """Show list of workers that are online.""" - option_list = inspect.option_list - - def run(self, *args, **kwargs): - I = inspect( - app=self.app, - no_color=kwargs.get('no_color', False), - stdout=self.stdout, stderr=self.stderr, - show_reply=False, show_body=False, quiet=True, - ) - replies = I.run('ping', **kwargs) - if not replies: - raise self.Error('No nodes replied within time constraint', - status=EX_UNAVAILABLE) - nodecount = len(replies) - if not kwargs.get('quiet', False): - self.out('\n{0} {1} online.'.format( - nodecount, text.pluralize(nodecount, 'node'))) - - -class migrate(Command): - """Migrate tasks from one broker to another. - - Examples:: - - celery migrate redis://localhost amqp://guest@localhost// - celery migrate django:// redis://localhost - - NOTE: This command is experimental, make sure you have - a backup of the tasks before you continue. - """ - args = ' ' - option_list = Command.option_list + ( - Option('--limit', '-n', type='int', - help='Number of tasks to consume (int)'), - Option('--timeout', '-t', type='float', default=1.0, - help='Timeout in seconds (float) waiting for tasks'), - Option('--ack-messages', '-a', action='store_true', - help='Ack messages from source broker.'), - Option('--tasks', '-T', - help='List of task names to filter on.'), - Option('--queues', '-Q', - help='List of queues to migrate.'), - Option('--forever', '-F', action='store_true', - help='Continually migrate tasks until killed.'), - ) - progress_fmt = MIGRATE_PROGRESS_FMT - - def on_migrate_task(self, state, body, message): - self.out(self.progress_fmt.format(state=state, body=body)) - - def run(self, source, destination, **kwargs): - from kombu import Connection - from celery.contrib.migrate import migrate_tasks - - migrate_tasks(Connection(source), - Connection(destination), - callback=self.on_migrate_task, - **kwargs) - - -class shell(Command): # pragma: no cover - """Start shell session with convenient access to celery symbols. - - The following symbols will be added to the main globals: - - - celery: the current application. - - chord, group, chain, chunks, - xmap, xstarmap subtask, Task - - all registered tasks. - - """ - option_list = Command.option_list + ( - Option('--ipython', '-I', - action='store_true', dest='force_ipython', - help='force iPython.'), - Option('--bpython', '-B', - action='store_true', dest='force_bpython', - help='force bpython.'), - Option('--python', '-P', - action='store_true', dest='force_python', - help='force default Python shell.'), - Option('--without-tasks', '-T', action='store_true', - help="don't add tasks to locals."), - Option('--eventlet', action='store_true', - help='use eventlet.'), - Option('--gevent', action='store_true', help='use gevent.'), - ) - - def run(self, force_ipython=False, force_bpython=False, - force_python=False, without_tasks=False, eventlet=False, - gevent=False, **kwargs): - sys.path.insert(0, os.getcwd()) - if eventlet: - import_module('celery.concurrency.eventlet') - if gevent: - import_module('celery.concurrency.gevent') - import celery - import celery.task.base - self.app.loader.import_default_modules() - self.locals = {'app': self.app, - 'celery': self.app, - 'Task': celery.Task, - 'chord': celery.chord, - 'group': celery.group, - 'chain': celery.chain, - 'chunks': celery.chunks, - 'xmap': celery.xmap, - 'xstarmap': celery.xstarmap, - 'subtask': celery.subtask, - 'signature': celery.signature} - - if not without_tasks: - self.locals.update(dict( - (task.__name__, task) for task in values(self.app.tasks) - if not task.name.startswith('celery.')), - ) - - if force_python: - return self.invoke_fallback_shell() - elif force_bpython: - return self.invoke_bpython_shell() - elif force_ipython: - return self.invoke_ipython_shell() - return self.invoke_default_shell() - - def invoke_default_shell(self): - try: - import IPython # noqa - except ImportError: - try: - import bpython # noqa - except ImportError: - return self.invoke_fallback_shell() - else: - return self.invoke_bpython_shell() - else: - return self.invoke_ipython_shell() - - def invoke_fallback_shell(self): - import code - try: - import readline - except ImportError: - pass - else: - import rlcompleter - readline.set_completer( - rlcompleter.Completer(self.locals).complete) - readline.parse_and_bind('tab:complete') - code.interact(local=self.locals) - - def invoke_ipython_shell(self): - for ip in (self._ipython, self._ipython_pre_10, - self._ipython_terminal, self._ipython_010, - self._no_ipython): - try: - return ip() - except ImportError: - pass - - def _ipython(self): - from IPython import start_ipython - start_ipython(argv=[], user_ns=self.locals) - - def _ipython_pre_10(self): # pragma: no cover - from IPython.frontend.terminal.ipapp import TerminalIPythonApp - app = TerminalIPythonApp.instance() - app.initialize(argv=[]) - app.shell.user_ns.update(self.locals) - app.start() - - def _ipython_terminal(self): # pragma: no cover - from IPython.terminal import embed - embed.TerminalInteractiveShell(user_ns=self.locals).mainloop() - - def _ipython_010(self): # pragma: no cover - from IPython.Shell import IPShell - IPShell(argv=[], user_ns=self.locals).mainloop() - - def _no_ipython(self): # pragma: no cover - raise ImportError("no suitable ipython found") - - def invoke_bpython_shell(self): - import bpython - bpython.embed(self.locals) - - -class help(Command): - """Show help screen and exit.""" - - def usage(self, command): - return '%prog [options] {0.args}'.format(self) - - def run(self, *args, **kwargs): - self.parser.print_help() - self.out(HELP.format( - prog_name=self.prog_name, - commands=CeleryCommand.list_commands(colored=self.colored), - )) - - return EX_USAGE - - -class report(Command): - """Shows information useful to include in bugreports.""" - - def run(self, *args, **kwargs): - self.out(self.app.bugreport()) - return EX_OK - - -class CeleryCommand(Command): - namespace = 'celery' - ext_fmt = '{self.namespace}.commands' - commands = { - 'amqp': amqp, - 'beat': beat, - 'call': call, - 'control': control, - 'events': events, - 'graph': graph, - 'help': help, - 'inspect': inspect, - 'list': list_, - 'migrate': migrate, - 'multi': multi, - 'purge': purge, - 'report': report, - 'result': result, - 'shell': shell, - 'status': status, - 'worker': worker, - - } - enable_config_from_cmdline = True - prog_name = 'celery' - - @classmethod - def register_command(cls, fun, name=None): - cls.commands[name or fun.__name__] = fun - return fun - - def execute(self, command, argv=None): - try: - cls = self.commands[command] - except KeyError: - cls, argv = self.commands['help'], ['help'] - cls = self.commands.get(command) or self.commands['help'] - try: - return cls( - app=self.app, on_error=self.on_error, - no_color=self.no_color, quiet=self.quiet, - on_usage_error=partial(self.on_usage_error, command=command), - ).run_from_argv(self.prog_name, argv[1:], command=argv[0]) - except self.UsageError as exc: - self.on_usage_error(exc) - return exc.status - except self.Error as exc: - self.on_error(exc) - return exc.status - - def on_usage_error(self, exc, command=None): - if command: - helps = '{self.prog_name} {command} --help' - else: - helps = '{self.prog_name} --help' - self.error(self.colored.magenta('Error: {0}'.format(exc))) - self.error("""Please try '{0}'""".format(helps.format( - self=self, command=command, - ))) - - def _relocate_args_from_start(self, argv, index=0): - if argv: - rest = [] - while index < len(argv): - value = argv[index] - if value.startswith('--'): - rest.append(value) - elif value.startswith('-'): - # we eat the next argument even though we don't know - # if this option takes an argument or not. - # instead we will assume what is the command name in the - # return statements below. - try: - nxt = argv[index + 1] - if nxt.startswith('-'): - # is another option - rest.append(value) - else: - # is (maybe) a value for this option - rest.extend([value, nxt]) - index += 1 - except IndexError: - rest.append(value) - break - else: - break - index += 1 - if argv[index:]: - # if there are more arguments left then divide and swap - # we assume the first argument in argv[i:] is the command - # name. - return argv[index:] + rest - # if there are no more arguments then the last arg in rest' - # must be the command. - [rest.pop()] + rest - return [] - - def prepare_prog_name(self, name): - if name == '__main__.py': - return sys.modules['__main__'].__file__ - return name - - def handle_argv(self, prog_name, argv): - self.prog_name = self.prepare_prog_name(prog_name) - argv = self._relocate_args_from_start(argv) - _, argv = self.prepare_args(None, argv) - try: - command = argv[0] - except IndexError: - command, argv = 'help', ['help'] - return self.execute(command, argv) - - def execute_from_commandline(self, argv=None): - argv = sys.argv if argv is None else argv - if 'multi' in argv[1:3]: # Issue 1008 - self.respects_app_option = False - try: - sys.exit(determine_exit_status( - super(CeleryCommand, self).execute_from_commandline(argv))) - except KeyboardInterrupt: - sys.exit(EX_FAILURE) - - @classmethod - def get_command_info(self, command, indent=0, color=None, colored=None): - colored = term.colored() if colored is None else colored - colored = colored.names[color] if color else lambda x: x - obj = self.commands[command] - cmd = 'celery {0}'.format(colored(command)) - if obj.leaf: - return '|' + text.indent(cmd, indent) - return text.join([ - ' ', - '|' + text.indent('{0} --help'.format(cmd), indent), - obj.list_commands(indent, 'celery {0}'.format(command), colored), - ]) - - @classmethod - def list_commands(self, indent=0, colored=None): - colored = term.colored() if colored is None else colored - white = colored.white - ret = [] - for cls, commands, color in command_classes: - ret.extend([ - text.indent('+ {0}: '.format(white(cls)), indent), - '\n'.join( - self.get_command_info(command, indent + 4, color, colored) - for command in commands), - '' - ]) - return '\n'.join(ret).strip() - - def with_pool_option(self, argv): - if len(argv) > 1 and 'worker' in argv[0:3]: - # this command supports custom pools - # that may have to be loaded as early as possible. - return (['-P'], ['--pool']) - - def on_concurrency_setup(self): - self.load_extension_commands() - - def load_extension_commands(self): - names = Extensions(self.ext_fmt.format(self=self), - self.register_command).load() - if names: - command_classes.append(('Extensions', names, 'magenta')) - - -def command(*args, **kwargs): - """Deprecated: Use classmethod :meth:`CeleryCommand.register_command` - instead.""" - _register = CeleryCommand.register_command - return _register(args[0]) if args else _register - - -if __name__ == '__main__': # pragma: no cover - main() diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/celeryd_detach.py b/thesisenv/lib/python3.6/site-packages/celery/bin/celeryd_detach.py deleted file mode 100644 index 4d37d5f..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/bin/celeryd_detach.py +++ /dev/null @@ -1,181 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.bin.celeryd_detach - ~~~~~~~~~~~~~~~~~~~~~~~~~ - - Program used to daemonize the worker - - Using :func:`os.execv` because forking and multiprocessing - leads to weird issues (it was a long time ago now, but it - could have something to do with the threading mutex bug) - -""" -from __future__ import absolute_import - -import celery -import os -import sys - -from optparse import OptionParser, BadOptionError - -from celery.platforms import EX_FAILURE, detached -from celery.utils import default_nodename, node_format -from celery.utils.log import get_logger - -from celery.bin.base import daemon_options, Option - -__all__ = ['detached_celeryd', 'detach'] - -logger = get_logger(__name__) - -C_FAKEFORK = os.environ.get('C_FAKEFORK') - -OPTION_LIST = daemon_options(default_pidfile='celeryd.pid') + ( - Option('--workdir', default=None, dest='working_directory'), - Option('-n', '--hostname'), - Option('--fake', - default=False, action='store_true', dest='fake', - help="Don't fork (for debugging purposes)"), -) - - -def detach(path, argv, logfile=None, pidfile=None, uid=None, - gid=None, umask=None, working_directory=None, fake=False, app=None, - executable=None, hostname=None): - hostname = default_nodename(hostname) - logfile = node_format(logfile, hostname) - pidfile = node_format(pidfile, hostname) - fake = 1 if C_FAKEFORK else fake - with detached(logfile, pidfile, uid, gid, umask, working_directory, fake, - after_forkers=False): - try: - if executable is not None: - path = executable - os.execv(path, [path] + argv) - except Exception: - if app is None: - from celery import current_app - app = current_app - app.log.setup_logging_subsystem( - 'ERROR', logfile, hostname=hostname) - logger.critical("Can't exec %r", ' '.join([path] + argv), - exc_info=True) - return EX_FAILURE - - -class PartialOptionParser(OptionParser): - - def __init__(self, *args, **kwargs): - self.leftovers = [] - OptionParser.__init__(self, *args, **kwargs) - - def _process_long_opt(self, rargs, values): - arg = rargs.pop(0) - - if '=' in arg: - opt, next_arg = arg.split('=', 1) - rargs.insert(0, next_arg) - had_explicit_value = True - else: - opt = arg - had_explicit_value = False - - try: - opt = self._match_long_opt(opt) - option = self._long_opt.get(opt) - except BadOptionError: - option = None - - if option: - if option.takes_value(): - nargs = option.nargs - if len(rargs) < nargs: - if nargs == 1: - self.error('{0} requires an argument'.format(opt)) - else: - self.error('{0} requires {1} arguments'.format( - opt, nargs)) - elif nargs == 1: - value = rargs.pop(0) - else: - value = tuple(rargs[0:nargs]) - del rargs[0:nargs] - - elif had_explicit_value: - self.error('{0} option does not take a value'.format(opt)) - else: - value = None - option.process(opt, value, values, self) - else: - self.leftovers.append(arg) - - def _process_short_opts(self, rargs, values): - arg = rargs[0] - try: - OptionParser._process_short_opts(self, rargs, values) - except BadOptionError: - self.leftovers.append(arg) - if rargs and not rargs[0][0] == '-': - self.leftovers.append(rargs.pop(0)) - - -class detached_celeryd(object): - option_list = OPTION_LIST - usage = '%prog [options] [celeryd options]' - version = celery.VERSION_BANNER - description = ('Detaches Celery worker nodes. See `celery worker --help` ' - 'for the list of supported worker arguments.') - command = sys.executable - execv_path = sys.executable - if sys.version_info < (2, 7): # does not support pkg/__main__.py - execv_argv = ['-m', 'celery.__main__', 'worker'] - else: - execv_argv = ['-m', 'celery', 'worker'] - - def __init__(self, app=None): - self.app = app - - def Parser(self, prog_name): - return PartialOptionParser(prog=prog_name, - option_list=self.option_list, - usage=self.usage, - description=self.description, - version=self.version) - - def parse_options(self, prog_name, argv): - parser = self.Parser(prog_name) - options, values = parser.parse_args(argv) - if options.logfile: - parser.leftovers.append('--logfile={0}'.format(options.logfile)) - if options.pidfile: - parser.leftovers.append('--pidfile={0}'.format(options.pidfile)) - if options.hostname: - parser.leftovers.append('--hostname={0}'.format(options.hostname)) - return options, values, parser.leftovers - - def execute_from_commandline(self, argv=None): - if argv is None: - argv = sys.argv - config = [] - seen_cargs = 0 - for arg in argv: - if seen_cargs: - config.append(arg) - else: - if arg == '--': - seen_cargs = 1 - config.append(arg) - prog_name = os.path.basename(argv[0]) - options, values, leftovers = self.parse_options(prog_name, argv[1:]) - sys.exit(detach( - app=self.app, path=self.execv_path, - argv=self.execv_argv + leftovers + config, - **vars(options) - )) - - -def main(app=None): - detached_celeryd(app).execute_from_commandline() - -if __name__ == '__main__': # pragma: no cover - main() diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/events.py b/thesisenv/lib/python3.6/site-packages/celery/bin/events.py deleted file mode 100644 index 8cc61b6..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/bin/events.py +++ /dev/null @@ -1,139 +0,0 @@ -# -*- coding: utf-8 -*- -""" - -The :program:`celery events` command. - -.. program:: celery events - -.. seealso:: - - See :ref:`preload-options` and :ref:`daemon-options`. - -.. cmdoption:: -d, --dump - - Dump events to stdout. - -.. cmdoption:: -c, --camera - - Take snapshots of events using this camera. - -.. cmdoption:: --detach - - Camera: Detach and run in the background as a daemon. - -.. cmdoption:: -F, --freq, --frequency - - Camera: Shutter frequency. Default is every 1.0 seconds. - -.. cmdoption:: -r, --maxrate - - Camera: Optional shutter rate limit (e.g. 10/m). - -.. cmdoption:: -l, --loglevel - - Logging level, choose between `DEBUG`, `INFO`, `WARNING`, - `ERROR`, `CRITICAL`, or `FATAL`. Default is INFO. - -""" -from __future__ import absolute_import, unicode_literals - -import sys - -from functools import partial - -from celery.platforms import detached, set_process_title, strargv -from celery.bin.base import Command, Option, daemon_options - -__all__ = ['events'] - - -class events(Command): - """Event-stream utilities. - - Commands:: - - celery events --app=proj - start graphical monitor (requires curses) - celery events -d --app=proj - dump events to screen. - celery events -b amqp:// - celery events -c [options] - run snapshot camera. - - Examples:: - - celery events - celery events -d - celery events -c mod.attr -F 1.0 --detach --maxrate=100/m -l info - """ - doc = __doc__ - supports_args = False - - def run(self, dump=False, camera=None, frequency=1.0, maxrate=None, - loglevel='INFO', logfile=None, prog_name='celery events', - pidfile=None, uid=None, gid=None, umask=None, - working_directory=None, detach=False, **kwargs): - self.prog_name = prog_name - - if dump: - return self.run_evdump() - if camera: - return self.run_evcam(camera, freq=frequency, maxrate=maxrate, - loglevel=loglevel, logfile=logfile, - pidfile=pidfile, uid=uid, gid=gid, - umask=umask, - working_directory=working_directory, - detach=detach) - return self.run_evtop() - - def run_evdump(self): - from celery.events.dumper import evdump - self.set_process_status('dump') - return evdump(app=self.app) - - def run_evtop(self): - from celery.events.cursesmon import evtop - self.set_process_status('top') - return evtop(app=self.app) - - def run_evcam(self, camera, logfile=None, pidfile=None, uid=None, - gid=None, umask=None, working_directory=None, - detach=False, **kwargs): - from celery.events.snapshot import evcam - workdir = working_directory - self.set_process_status('cam') - kwargs['app'] = self.app - cam = partial(evcam, camera, - logfile=logfile, pidfile=pidfile, **kwargs) - - if detach: - with detached(logfile, pidfile, uid, gid, umask, workdir): - return cam() - else: - return cam() - - def set_process_status(self, prog, info=''): - prog = '{0}:{1}'.format(self.prog_name, prog) - info = '{0} {1}'.format(info, strargv(sys.argv)) - return set_process_title(prog, info=info) - - def get_options(self): - return ( - (Option('-d', '--dump', action='store_true'), - Option('-c', '--camera'), - Option('--detach', action='store_true'), - Option('-F', '--frequency', '--freq', - type='float', default=1.0), - Option('-r', '--maxrate'), - Option('-l', '--loglevel', default='INFO')) + - daemon_options(default_pidfile='celeryev.pid') + - tuple(self.app.user_options['events']) - ) - - -def main(): - ev = events() - ev.execute_from_commandline() - -if __name__ == '__main__': # pragma: no cover - main() diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/graph.py b/thesisenv/lib/python3.6/site-packages/celery/bin/graph.py deleted file mode 100644 index 5d58476..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/bin/graph.py +++ /dev/null @@ -1,191 +0,0 @@ -# -*- coding: utf-8 -*- -""" - -The :program:`celery graph` command. - -.. program:: celery graph - -""" -from __future__ import absolute_import, unicode_literals - -from operator import itemgetter - -from celery.datastructures import DependencyGraph, GraphFormatter -from celery.five import items - -from .base import Command - -__all__ = ['graph'] - - -class graph(Command): - args = """ [arguments] - ..... bootsteps [worker] [consumer] - ..... workers [enumerate] - """ - - def run(self, what=None, *args, **kwargs): - map = {'bootsteps': self.bootsteps, 'workers': self.workers} - if not what: - raise self.UsageError('missing type') - elif what not in map: - raise self.Error('no graph {0} in {1}'.format(what, '|'.join(map))) - return map[what](*args, **kwargs) - - def bootsteps(self, *args, **kwargs): - worker = self.app.WorkController() - include = set(arg.lower() for arg in args or ['worker', 'consumer']) - if 'worker' in include: - graph = worker.blueprint.graph - if 'consumer' in include: - worker.blueprint.connect_with(worker.consumer.blueprint) - else: - graph = worker.consumer.blueprint.graph - graph.to_dot(self.stdout) - - def workers(self, *args, **kwargs): - - def simplearg(arg): - return maybe_list(itemgetter(0, 2)(arg.partition(':'))) - - def maybe_list(l, sep=','): - return (l[0], l[1].split(sep) if sep in l[1] else l[1]) - - args = dict(simplearg(arg) for arg in args) - generic = 'generic' in args - - def generic_label(node): - return '{0} ({1}://)'.format(type(node).__name__, - node._label.split('://')[0]) - - class Node(object): - force_label = None - scheme = {} - - def __init__(self, label, pos=None): - self._label = label - self.pos = pos - - def label(self): - return self._label - - def __str__(self): - return self.label() - - class Thread(Node): - scheme = {'fillcolor': 'lightcyan4', 'fontcolor': 'yellow', - 'shape': 'oval', 'fontsize': 10, 'width': 0.3, - 'color': 'black'} - - def __init__(self, label, **kwargs): - self._label = 'thr-{0}'.format(next(tids)) - self.real_label = label - self.pos = 0 - - class Formatter(GraphFormatter): - - def label(self, obj): - return obj and obj.label() - - def node(self, obj): - scheme = dict(obj.scheme) if obj.pos else obj.scheme - if isinstance(obj, Thread): - scheme['label'] = obj.real_label - return self.draw_node( - obj, dict(self.node_scheme, **scheme), - ) - - def terminal_node(self, obj): - return self.draw_node( - obj, dict(self.term_scheme, **obj.scheme), - ) - - def edge(self, a, b, **attrs): - if isinstance(a, Thread): - attrs.update(arrowhead='none', arrowtail='tee') - return self.draw_edge(a, b, self.edge_scheme, attrs) - - def subscript(n): - S = {'0': '₀', '1': '₁', '2': '₂', '3': '₃', '4': '₄', - '5': '₅', '6': '₆', '7': '₇', '8': '₈', '9': '₉'} - return ''.join([S[i] for i in str(n)]) - - class Worker(Node): - pass - - class Backend(Node): - scheme = {'shape': 'folder', 'width': 2, - 'height': 1, 'color': 'black', - 'fillcolor': 'peachpuff3', 'color': 'peachpuff4'} - - def label(self): - return generic_label(self) if generic else self._label - - class Broker(Node): - scheme = {'shape': 'circle', 'fillcolor': 'cadetblue3', - 'color': 'cadetblue4', 'height': 1} - - def label(self): - return generic_label(self) if generic else self._label - - from itertools import count - tids = count(1) - Wmax = int(args.get('wmax', 4) or 0) - Tmax = int(args.get('tmax', 3) or 0) - - def maybe_abbr(l, name, max=Wmax): - size = len(l) - abbr = max and size > max - if 'enumerate' in args: - l = ['{0}{1}'.format(name, subscript(i + 1)) - for i, obj in enumerate(l)] - if abbr: - l = l[0:max - 1] + [l[size - 1]] - l[max - 2] = '{0}⎨…{1}⎬'.format( - name[0], subscript(size - (max - 1))) - return l - - try: - workers = args['nodes'] - threads = args.get('threads') or [] - except KeyError: - replies = self.app.control.inspect().stats() - workers, threads = [], [] - for worker, reply in items(replies): - workers.append(worker) - threads.append(reply['pool']['max-concurrency']) - - wlen = len(workers) - backend = args.get('backend', self.app.conf.CELERY_RESULT_BACKEND) - threads_for = {} - workers = maybe_abbr(workers, 'Worker') - if Wmax and wlen > Wmax: - threads = threads[0:3] + [threads[-1]] - for i, threads in enumerate(threads): - threads_for[workers[i]] = maybe_abbr( - list(range(int(threads))), 'P', Tmax, - ) - - broker = Broker(args.get('broker', self.app.connection().as_uri())) - backend = Backend(backend) if backend else None - graph = DependencyGraph(formatter=Formatter()) - graph.add_arc(broker) - if backend: - graph.add_arc(backend) - curworker = [0] - for i, worker in enumerate(workers): - worker = Worker(worker, pos=i) - graph.add_arc(worker) - graph.add_edge(worker, broker) - if backend: - graph.add_edge(worker, backend) - threads = threads_for.get(worker._label) - if threads: - for thread in threads: - thread = Thread(thread) - graph.add_arc(thread) - graph.add_edge(thread, worker) - - curworker[0] += 1 - - graph.to_dot(self.stdout) diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/multi.py b/thesisenv/lib/python3.6/site-packages/celery/bin/multi.py deleted file mode 100644 index f30aa9e..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/bin/multi.py +++ /dev/null @@ -1,646 +0,0 @@ -# -*- coding: utf-8 -*- -""" - -.. program:: celery multi - -Examples -======== - -.. code-block:: bash - - # Single worker with explicit name and events enabled. - $ celery multi start Leslie -E - - # Pidfiles and logfiles are stored in the current directory - # by default. Use --pidfile and --logfile argument to change - # this. The abbreviation %N will be expanded to the current - # node name. - $ celery multi start Leslie -E --pidfile=/var/run/celery/%N.pid - --logfile=/var/log/celery/%N.log - - - # You need to add the same arguments when you restart, - # as these are not persisted anywhere. - $ celery multi restart Leslie -E --pidfile=/var/run/celery/%N.pid - --logfile=/var/run/celery/%N.log - - # To stop the node, you need to specify the same pidfile. - $ celery multi stop Leslie --pidfile=/var/run/celery/%N.pid - - # 3 workers, with 3 processes each - $ celery multi start 3 -c 3 - celery worker -n celery1@myhost -c 3 - celery worker -n celery2@myhost -c 3 - celery worker -n celery3@myhost -c 3 - - # start 3 named workers - $ celery multi start image video data -c 3 - celery worker -n image@myhost -c 3 - celery worker -n video@myhost -c 3 - celery worker -n data@myhost -c 3 - - # specify custom hostname - $ celery multi start 2 --hostname=worker.example.com -c 3 - celery worker -n celery1@worker.example.com -c 3 - celery worker -n celery2@worker.example.com -c 3 - - # specify fully qualified nodenames - $ celery multi start foo@worker.example.com bar@worker.example.com -c 3 - - # Advanced example starting 10 workers in the background: - # * Three of the workers processes the images and video queue - # * Two of the workers processes the data queue with loglevel DEBUG - # * the rest processes the default' queue. - $ celery multi start 10 -l INFO -Q:1-3 images,video -Q:4,5 data - -Q default -L:4,5 DEBUG - - # You can show the commands necessary to start the workers with - # the 'show' command: - $ celery multi show 10 -l INFO -Q:1-3 images,video -Q:4,5 data - -Q default -L:4,5 DEBUG - - # Additional options are added to each celery worker' comamnd, - # but you can also modify the options for ranges of, or specific workers - - # 3 workers: Two with 3 processes, and one with 10 processes. - $ celery multi start 3 -c 3 -c:1 10 - celery worker -n celery1@myhost -c 10 - celery worker -n celery2@myhost -c 3 - celery worker -n celery3@myhost -c 3 - - # can also specify options for named workers - $ celery multi start image video data -c 3 -c:image 10 - celery worker -n image@myhost -c 10 - celery worker -n video@myhost -c 3 - celery worker -n data@myhost -c 3 - - # ranges and lists of workers in options is also allowed: - # (-c:1-3 can also be written as -c:1,2,3) - $ celery multi start 5 -c 3 -c:1-3 10 - celery worker -n celery1@myhost -c 10 - celery worker -n celery2@myhost -c 10 - celery worker -n celery3@myhost -c 10 - celery worker -n celery4@myhost -c 3 - celery worker -n celery5@myhost -c 3 - - # lists also works with named workers - $ celery multi start foo bar baz xuzzy -c 3 -c:foo,bar,baz 10 - celery worker -n foo@myhost -c 10 - celery worker -n bar@myhost -c 10 - celery worker -n baz@myhost -c 10 - celery worker -n xuzzy@myhost -c 3 - -""" -from __future__ import absolute_import, print_function, unicode_literals - -import errno -import os -import shlex -import signal -import socket -import sys - -from collections import defaultdict, namedtuple -from subprocess import Popen -from time import sleep - -from kombu.utils import cached_property -from kombu.utils.compat import OrderedDict -from kombu.utils.encoding import from_utf8 - -from celery import VERSION_BANNER -from celery.five import items -from celery.platforms import Pidfile, IS_WINDOWS -from celery.utils import term, nodesplit -from celery.utils.text import pluralize - -__all__ = ['MultiTool'] - -SIGNAMES = set(sig for sig in dir(signal) - if sig.startswith('SIG') and '_' not in sig) -SIGMAP = dict((getattr(signal, name), name) for name in SIGNAMES) - -USAGE = """\ -usage: {prog_name} start [worker options] - {prog_name} stop [-SIG (default: -TERM)] - {prog_name} stopwait [-SIG (default: -TERM)] - {prog_name} restart [-SIG] [worker options] - {prog_name} kill - - {prog_name} show [worker options] - {prog_name} get hostname [-qv] [worker options] - {prog_name} names - {prog_name} expand template - {prog_name} help - -additional options (must appear after command name): - - * --nosplash: Don't display program info. - * --quiet: Don't show as much output. - * --verbose: Show more output. - * --no-color: Don't display colors. -""" - -multi_args_t = namedtuple( - 'multi_args_t', ('name', 'argv', 'expander', 'namespace'), -) - - -def main(): - sys.exit(MultiTool().execute_from_commandline(sys.argv)) - - -CELERY_EXE = 'celery' -if sys.version_info < (2, 7): - # pkg.__main__ first supported in Py2.7 - CELERY_EXE = 'celery.__main__' - - -def celery_exe(*args): - return ' '.join((CELERY_EXE, ) + args) - - -class MultiTool(object): - retcode = 0 # Final exit code. - - def __init__(self, env=None, fh=None, quiet=False, verbose=False, - no_color=False, nosplash=False, stdout=None, stderr=None): - """fh is an old alias to stdout.""" - self.stdout = self.fh = stdout or fh or sys.stdout - self.stderr = stderr or sys.stderr - self.env = env - self.nosplash = nosplash - self.quiet = quiet - self.verbose = verbose - self.no_color = no_color - self.prog_name = 'celery multi' - self.commands = {'start': self.start, - 'show': self.show, - 'stop': self.stop, - 'stopwait': self.stopwait, - 'stop_verify': self.stopwait, # compat alias - 'restart': self.restart, - 'kill': self.kill, - 'names': self.names, - 'expand': self.expand, - 'get': self.get, - 'help': self.help} - - def execute_from_commandline(self, argv, cmd='celery worker'): - argv = list(argv) # don't modify callers argv. - - # Reserve the --nosplash|--quiet|-q/--verbose options. - if '--nosplash' in argv: - self.nosplash = argv.pop(argv.index('--nosplash')) - if '--quiet' in argv: - self.quiet = argv.pop(argv.index('--quiet')) - if '-q' in argv: - self.quiet = argv.pop(argv.index('-q')) - if '--verbose' in argv: - self.verbose = argv.pop(argv.index('--verbose')) - if '--no-color' in argv: - self.no_color = argv.pop(argv.index('--no-color')) - - self.prog_name = os.path.basename(argv.pop(0)) - if not argv or argv[0][0] == '-': - return self.error() - - try: - self.commands[argv[0]](argv[1:], cmd) - except KeyError: - self.error('Invalid command: {0}'.format(argv[0])) - - return self.retcode - - def say(self, m, newline=True, file=None): - print(m, file=file or self.stdout, end='\n' if newline else '') - - def carp(self, m, newline=True, file=None): - return self.say(m, newline, file or self.stderr) - - def names(self, argv, cmd): - p = NamespacedOptionParser(argv) - self.say('\n'.join( - n.name for n in multi_args(p, cmd)), - ) - - def get(self, argv, cmd): - wanted = argv[0] - p = NamespacedOptionParser(argv[1:]) - for node in multi_args(p, cmd): - if node.name == wanted: - self.say(' '.join(node.argv)) - return - - def show(self, argv, cmd): - p = NamespacedOptionParser(argv) - self.with_detacher_default_options(p) - self.say('\n'.join( - ' '.join([sys.executable] + n.argv) for n in multi_args(p, cmd)), - ) - - def start(self, argv, cmd): - self.splash() - p = NamespacedOptionParser(argv) - self.with_detacher_default_options(p) - retcodes = [] - self.note('> Starting nodes...') - for node in multi_args(p, cmd): - self.note('\t> {0}: '.format(node.name), newline=False) - retcode = self.waitexec(node.argv, path=p.options['--executable']) - self.note(retcode and self.FAILED or self.OK) - retcodes.append(retcode) - self.retcode = int(any(retcodes)) - - def with_detacher_default_options(self, p): - _setdefaultopt(p.options, ['--pidfile', '-p'], '%N.pid') - _setdefaultopt(p.options, ['--logfile', '-f'], '%N.log') - p.options.setdefault( - '--cmd', - '-m {0}'.format(celery_exe('worker', '--detach')), - ) - _setdefaultopt(p.options, ['--executable'], sys.executable) - - def signal_node(self, nodename, pid, sig): - try: - os.kill(pid, sig) - except OSError as exc: - if exc.errno != errno.ESRCH: - raise - self.note('Could not signal {0} ({1}): No such process'.format( - nodename, pid)) - return False - return True - - def node_alive(self, pid): - try: - os.kill(pid, 0) - except OSError as exc: - if exc.errno == errno.ESRCH: - return False - raise - return True - - def shutdown_nodes(self, nodes, sig=signal.SIGTERM, retry=None, - callback=None): - if not nodes: - return - P = set(nodes) - - def on_down(node): - P.discard(node) - if callback: - callback(*node) - - self.note(self.colored.blue('> Stopping nodes...')) - for node in list(P): - if node in P: - nodename, _, pid = node - self.note('\t> {0}: {1} -> {2}'.format( - nodename, SIGMAP[sig][3:], pid)) - if not self.signal_node(nodename, pid, sig): - on_down(node) - - def note_waiting(): - left = len(P) - if left: - pids = ', '.join(str(pid) for _, _, pid in P) - self.note(self.colored.blue( - '> Waiting for {0} {1} -> {2}...'.format( - left, pluralize(left, 'node'), pids)), newline=False) - - if retry: - note_waiting() - its = 0 - while P: - for node in P: - its += 1 - self.note('.', newline=False) - nodename, _, pid = node - if not self.node_alive(pid): - self.note('\n\t> {0}: {1}'.format(nodename, self.OK)) - on_down(node) - note_waiting() - break - if P and not its % len(P): - sleep(float(retry)) - self.note('') - - def getpids(self, p, cmd, callback=None): - _setdefaultopt(p.options, ['--pidfile', '-p'], '%N.pid') - - nodes = [] - for node in multi_args(p, cmd): - try: - pidfile_template = _getopt( - p.namespaces[node.namespace], ['--pidfile', '-p'], - ) - except KeyError: - pidfile_template = _getopt(p.options, ['--pidfile', '-p']) - pid = None - pidfile = node.expander(pidfile_template) - try: - pid = Pidfile(pidfile).read_pid() - except ValueError: - pass - if pid: - nodes.append((node.name, tuple(node.argv), pid)) - else: - self.note('> {0.name}: {1}'.format(node, self.DOWN)) - if callback: - callback(node.name, node.argv, pid) - - return nodes - - def kill(self, argv, cmd): - self.splash() - p = NamespacedOptionParser(argv) - for nodename, _, pid in self.getpids(p, cmd): - self.note('Killing node {0} ({1})'.format(nodename, pid)) - self.signal_node(nodename, pid, signal.SIGKILL) - - def stop(self, argv, cmd, retry=None, callback=None): - self.splash() - p = NamespacedOptionParser(argv) - return self._stop_nodes(p, cmd, retry=retry, callback=callback) - - def _stop_nodes(self, p, cmd, retry=None, callback=None): - restargs = p.args[len(p.values):] - self.shutdown_nodes(self.getpids(p, cmd, callback=callback), - sig=findsig(restargs), - retry=retry, - callback=callback) - - def restart(self, argv, cmd): - self.splash() - p = NamespacedOptionParser(argv) - self.with_detacher_default_options(p) - retvals = [] - - def on_node_shutdown(nodename, argv, pid): - self.note(self.colored.blue( - '> Restarting node {0}: '.format(nodename)), newline=False) - retval = self.waitexec(argv, path=p.options['--executable']) - self.note(retval and self.FAILED or self.OK) - retvals.append(retval) - - self._stop_nodes(p, cmd, retry=2, callback=on_node_shutdown) - self.retval = int(any(retvals)) - - def stopwait(self, argv, cmd): - self.splash() - p = NamespacedOptionParser(argv) - self.with_detacher_default_options(p) - return self._stop_nodes(p, cmd, retry=2) - stop_verify = stopwait # compat - - def expand(self, argv, cmd=None): - template = argv[0] - p = NamespacedOptionParser(argv[1:]) - for node in multi_args(p, cmd): - self.say(node.expander(template)) - - def help(self, argv, cmd=None): - self.say(__doc__) - - def usage(self): - self.splash() - self.say(USAGE.format(prog_name=self.prog_name)) - - def splash(self): - if not self.nosplash: - c = self.colored - self.note(c.cyan('celery multi v{0}'.format(VERSION_BANNER))) - - def waitexec(self, argv, path=sys.executable): - args = ' '.join([path] + list(argv)) - argstr = shlex.split(from_utf8(args), posix=not IS_WINDOWS) - pipe = Popen(argstr, env=self.env) - self.info(' {0}'.format(' '.join(argstr))) - retcode = pipe.wait() - if retcode < 0: - self.note('* Child was terminated by signal {0}'.format(-retcode)) - return -retcode - elif retcode > 0: - self.note('* Child terminated with errorcode {0}'.format(retcode)) - return retcode - - def error(self, msg=None): - if msg: - self.carp(msg) - self.usage() - self.retcode = 1 - return 1 - - def info(self, msg, newline=True): - if self.verbose: - self.note(msg, newline=newline) - - def note(self, msg, newline=True): - if not self.quiet: - self.say(str(msg), newline=newline) - - @cached_property - def colored(self): - return term.colored(enabled=not self.no_color) - - @cached_property - def OK(self): - return str(self.colored.green('OK')) - - @cached_property - def FAILED(self): - return str(self.colored.red('FAILED')) - - @cached_property - def DOWN(self): - return str(self.colored.magenta('DOWN')) - - -def multi_args(p, cmd='celery worker', append='', prefix='', suffix=''): - names = p.values - options = dict(p.options) - passthrough = p.passthrough - ranges = len(names) == 1 - if ranges: - try: - noderange = int(names[0]) - except ValueError: - pass - else: - names = [str(n) for n in range(1, noderange + 1)] - prefix = 'celery' - cmd = options.pop('--cmd', cmd) - append = options.pop('--append', append) - hostname = options.pop('--hostname', - options.pop('-n', socket.gethostname())) - prefix = options.pop('--prefix', prefix) or '' - suffix = options.pop('--suffix', suffix) or hostname - if suffix in ('""', "''"): - suffix = '' - - for ns_name, ns_opts in list(items(p.namespaces)): - if ',' in ns_name or (ranges and '-' in ns_name): - for subns in parse_ns_range(ns_name, ranges): - p.namespaces[subns].update(ns_opts) - p.namespaces.pop(ns_name) - - # Numbers in args always refers to the index in the list of names. - # (e.g. `start foo bar baz -c:1` where 1 is foo, 2 is bar, and so on). - for ns_name, ns_opts in list(items(p.namespaces)): - if ns_name.isdigit(): - ns_index = int(ns_name) - 1 - if ns_index < 0: - raise KeyError('Indexes start at 1 got: %r' % (ns_name, )) - try: - p.namespaces[names[ns_index]].update(ns_opts) - except IndexError: - raise KeyError('No node at index %r' % (ns_name, )) - - for name in names: - this_suffix = suffix - if '@' in name: - this_name = options['-n'] = name - nodename, this_suffix = nodesplit(name) - name = nodename - else: - nodename = '%s%s' % (prefix, name) - this_name = options['-n'] = '%s@%s' % (nodename, this_suffix) - expand = abbreviations({'%h': this_name, - '%n': name, - '%N': nodename, - '%d': this_suffix}) - argv = ([expand(cmd)] + - [format_opt(opt, expand(value)) - for opt, value in items(p.optmerge(name, options))] + - [passthrough]) - if append: - argv.append(expand(append)) - yield multi_args_t(this_name, argv, expand, name) - - -class NamespacedOptionParser(object): - - def __init__(self, args): - self.args = args - self.options = OrderedDict() - self.values = [] - self.passthrough = '' - self.namespaces = defaultdict(lambda: OrderedDict()) - - self.parse() - - def parse(self): - rargs = list(self.args) - pos = 0 - while pos < len(rargs): - arg = rargs[pos] - if arg == '--': - self.passthrough = ' '.join(rargs[pos:]) - break - elif arg[0] == '-': - if arg[1] == '-': - self.process_long_opt(arg[2:]) - else: - value = None - if len(rargs) > pos + 1 and rargs[pos + 1][0] != '-': - value = rargs[pos + 1] - pos += 1 - self.process_short_opt(arg[1:], value) - else: - self.values.append(arg) - pos += 1 - - def process_long_opt(self, arg, value=None): - if '=' in arg: - arg, value = arg.split('=', 1) - self.add_option(arg, value, short=False) - - def process_short_opt(self, arg, value=None): - self.add_option(arg, value, short=True) - - def optmerge(self, ns, defaults=None): - if defaults is None: - defaults = self.options - return OrderedDict(defaults, **self.namespaces[ns]) - - def add_option(self, name, value, short=False, ns=None): - prefix = short and '-' or '--' - dest = self.options - if ':' in name: - name, ns = name.split(':') - dest = self.namespaces[ns] - dest[prefix + name] = value - - -def quote(v): - return "\\'".join("'" + p + "'" for p in v.split("'")) - - -def format_opt(opt, value): - if not value: - return opt - if opt.startswith('--'): - return '{0}={1}'.format(opt, value) - return '{0} {1}'.format(opt, value) - - -def parse_ns_range(ns, ranges=False): - ret = [] - for space in ',' in ns and ns.split(',') or [ns]: - if ranges and '-' in space: - start, stop = space.split('-') - ret.extend( - str(n) for n in range(int(start), int(stop) + 1) - ) - else: - ret.append(space) - return ret - - -def abbreviations(mapping): - - def expand(S): - ret = S - if S is not None: - for short_opt, long_opt in items(mapping): - ret = ret.replace(short_opt, long_opt) - return ret - - return expand - - -def findsig(args, default=signal.SIGTERM): - for arg in reversed(args): - if len(arg) == 2 and arg[0] == '-': - try: - return int(arg[1]) - except ValueError: - pass - if arg[0] == '-': - maybe_sig = 'SIG' + arg[1:] - if maybe_sig in SIGNAMES: - return getattr(signal, maybe_sig) - return default - - -def _getopt(d, alt): - for opt in alt: - try: - return d[opt] - except KeyError: - pass - raise KeyError(alt[0]) - - -def _setdefaultopt(d, alt, value): - for opt in alt[1:]: - try: - return d[opt] - except KeyError: - pass - return d.setdefault(alt[0], value) - - -if __name__ == '__main__': # pragma: no cover - main() diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/worker.py b/thesisenv/lib/python3.6/site-packages/celery/bin/worker.py deleted file mode 100644 index dc04075..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/bin/worker.py +++ /dev/null @@ -1,270 +0,0 @@ -# -*- coding: utf-8 -*- -""" - -The :program:`celery worker` command (previously known as ``celeryd``) - -.. program:: celery worker - -.. seealso:: - - See :ref:`preload-options`. - -.. cmdoption:: -c, --concurrency - - Number of child processes processing the queue. The default - is the number of CPUs available on your system. - -.. cmdoption:: -P, --pool - - Pool implementation: - - prefork (default), eventlet, gevent, solo or threads. - -.. cmdoption:: -f, --logfile - - Path to log file. If no logfile is specified, `stderr` is used. - -.. cmdoption:: -l, --loglevel - - Logging level, choose between `DEBUG`, `INFO`, `WARNING`, - `ERROR`, `CRITICAL`, or `FATAL`. - -.. cmdoption:: -n, --hostname - - Set custom hostname, e.g. 'w1.%h'. Expands: %h (hostname), - %n (name) and %d, (domain). - -.. cmdoption:: -B, --beat - - Also run the `celery beat` periodic task scheduler. Please note that - there must only be one instance of this service. - -.. cmdoption:: -Q, --queues - - List of queues to enable for this worker, separated by comma. - By default all configured queues are enabled. - Example: `-Q video,image` - -.. cmdoption:: -I, --include - - Comma separated list of additional modules to import. - Example: -I foo.tasks,bar.tasks - -.. cmdoption:: -s, --schedule - - Path to the schedule database if running with the `-B` option. - Defaults to `celerybeat-schedule`. The extension ".db" may be - appended to the filename. - -.. cmdoption:: -O - - Apply optimization profile. Supported: default, fair - -.. cmdoption:: --scheduler - - Scheduler class to use. Default is celery.beat.PersistentScheduler - -.. cmdoption:: -S, --statedb - - Path to the state database. The extension '.db' may - be appended to the filename. Default: {default} - -.. cmdoption:: -E, --events - - Send events that can be captured by monitors like :program:`celery events`, - `celerymon`, and others. - -.. cmdoption:: --without-gossip - - Do not subscribe to other workers events. - -.. cmdoption:: --without-mingle - - Do not synchronize with other workers at startup. - -.. cmdoption:: --without-heartbeat - - Do not send event heartbeats. - -.. cmdoption:: --heartbeat-interval - - Interval in seconds at which to send worker heartbeat - -.. cmdoption:: --purge - - Purges all waiting tasks before the daemon is started. - **WARNING**: This is unrecoverable, and the tasks will be - deleted from the messaging server. - -.. cmdoption:: --time-limit - - Enables a hard time limit (in seconds int/float) for tasks. - -.. cmdoption:: --soft-time-limit - - Enables a soft time limit (in seconds int/float) for tasks. - -.. cmdoption:: --maxtasksperchild - - Maximum number of tasks a pool worker can execute before it's - terminated and replaced by a new worker. - -.. cmdoption:: --pidfile - - Optional file used to store the workers pid. - - The worker will not start if this file already exists - and the pid is still alive. - -.. cmdoption:: --autoscale - - Enable autoscaling by providing - max_concurrency, min_concurrency. Example:: - - --autoscale=10,3 - - (always keep 3 processes, but grow to 10 if necessary) - -.. cmdoption:: --autoreload - - Enable autoreloading. - -.. cmdoption:: --no-execv - - Don't do execv after multiprocessing child fork. - -""" -from __future__ import absolute_import, unicode_literals - -import sys - -from celery import concurrency -from celery.bin.base import Command, Option, daemon_options -from celery.bin.celeryd_detach import detached_celeryd -from celery.five import string_t -from celery.platforms import maybe_drop_privileges -from celery.utils import default_nodename -from celery.utils.log import LOG_LEVELS, mlevel - -__all__ = ['worker', 'main'] - -__MODULE_DOC__ = __doc__ - - -class worker(Command): - """Start worker instance. - - Examples:: - - celery worker --app=proj -l info - celery worker -A proj -l info -Q hipri,lopri - - celery worker -A proj --concurrency=4 - celery worker -A proj --concurrency=1000 -P eventlet - - celery worker --autoscale=10,0 - """ - doc = __MODULE_DOC__ # parse help from this too - namespace = 'celeryd' - enable_config_from_cmdline = True - supports_args = False - - def run_from_argv(self, prog_name, argv=None, command=None): - command = sys.argv[0] if command is None else command - argv = sys.argv[1:] if argv is None else argv - # parse options before detaching so errors can be handled. - options, args = self.prepare_args( - *self.parse_options(prog_name, argv, command)) - self.maybe_detach([command] + argv) - return self(*args, **options) - - def maybe_detach(self, argv, dopts=['-D', '--detach']): - if any(arg in argv for arg in dopts): - argv = [v for v in argv if v not in dopts] - # will never return - detached_celeryd(self.app).execute_from_commandline(argv) - raise SystemExit(0) - - def run(self, hostname=None, pool_cls=None, app=None, uid=None, gid=None, - loglevel=None, logfile=None, pidfile=None, state_db=None, - **kwargs): - maybe_drop_privileges(uid=uid, gid=gid) - # Pools like eventlet/gevent needs to patch libs as early - # as possible. - pool_cls = (concurrency.get_implementation(pool_cls) or - self.app.conf.CELERYD_POOL) - if self.app.IS_WINDOWS and kwargs.get('beat'): - self.die('-B option does not work on Windows. ' - 'Please run celery beat as a separate service.') - hostname = self.host_format(default_nodename(hostname)) - if loglevel: - try: - loglevel = mlevel(loglevel) - except KeyError: # pragma: no cover - self.die('Unknown level {0!r}. Please use one of {1}.'.format( - loglevel, '|'.join( - l for l in LOG_LEVELS if isinstance(l, string_t)))) - - return self.app.Worker( - hostname=hostname, pool_cls=pool_cls, loglevel=loglevel, - logfile=logfile, # node format handled by celery.app.log.setup - pidfile=self.node_format(pidfile, hostname), - state_db=self.node_format(state_db, hostname), **kwargs - ).start() - - def with_pool_option(self, argv): - # this command support custom pools - # that may have to be loaded as early as possible. - return (['-P'], ['--pool']) - - def get_options(self): - conf = self.app.conf - return ( - Option('-c', '--concurrency', - default=conf.CELERYD_CONCURRENCY, type='int'), - Option('-P', '--pool', default=conf.CELERYD_POOL, dest='pool_cls'), - Option('--purge', '--discard', default=False, action='store_true'), - Option('-l', '--loglevel', default=conf.CELERYD_LOG_LEVEL), - Option('-n', '--hostname'), - Option('-B', '--beat', action='store_true'), - Option('-s', '--schedule', dest='schedule_filename', - default=conf.CELERYBEAT_SCHEDULE_FILENAME), - Option('--scheduler', dest='scheduler_cls'), - Option('-S', '--statedb', - default=conf.CELERYD_STATE_DB, dest='state_db'), - Option('-E', '--events', default=conf.CELERY_SEND_EVENTS, - action='store_true', dest='send_events'), - Option('--time-limit', type='float', dest='task_time_limit', - default=conf.CELERYD_TASK_TIME_LIMIT), - Option('--soft-time-limit', dest='task_soft_time_limit', - default=conf.CELERYD_TASK_SOFT_TIME_LIMIT, type='float'), - Option('--maxtasksperchild', dest='max_tasks_per_child', - default=conf.CELERYD_MAX_TASKS_PER_CHILD, type='int'), - Option('--queues', '-Q', default=[]), - Option('--exclude-queues', '-X', default=[]), - Option('--include', '-I', default=[]), - Option('--autoscale'), - Option('--autoreload', action='store_true'), - Option('--no-execv', action='store_true', default=False), - Option('--without-gossip', action='store_true', default=False), - Option('--without-mingle', action='store_true', default=False), - Option('--without-heartbeat', action='store_true', default=False), - Option('--heartbeat-interval', type='int'), - Option('-O', dest='optimization'), - Option('-D', '--detach', action='store_true'), - ) + daemon_options() + tuple(self.app.user_options['worker']) - - -def main(app=None): - # Fix for setuptools generated scripts, so that it will - # work with multiprocessing fork emulation. - # (see multiprocessing.forking.get_preparation_data()) - if __name__ != '__main__': # pragma: no cover - sys.modules['__main__'] = sys.modules[__name__] - from billiard import freeze_support - freeze_support() - worker(app=app).execute_from_commandline() - - -if __name__ == '__main__': # pragma: no cover - main() diff --git a/thesisenv/lib/python3.6/site-packages/celery/bootsteps.py b/thesisenv/lib/python3.6/site-packages/celery/bootsteps.py deleted file mode 100644 index 4471a4c..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/bootsteps.py +++ /dev/null @@ -1,422 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.bootsteps - ~~~~~~~~~~~~~~~~ - - A directed acyclic graph of reusable components. - -""" -from __future__ import absolute_import, unicode_literals - -from collections import deque -from threading import Event - -from kombu.common import ignore_errors -from kombu.utils import symbol_by_name - -from .datastructures import DependencyGraph, GraphFormatter -from .five import values, with_metaclass -from .utils.imports import instantiate, qualname -from .utils.log import get_logger - -try: - from greenlet import GreenletExit - IGNORE_ERRORS = (GreenletExit, ) -except ImportError: # pragma: no cover - IGNORE_ERRORS = () - -__all__ = ['Blueprint', 'Step', 'StartStopStep', 'ConsumerStep'] - -#: States -RUN = 0x1 -CLOSE = 0x2 -TERMINATE = 0x3 - -logger = get_logger(__name__) -debug = logger.debug - - -def _pre(ns, fmt): - return '| {0}: {1}'.format(ns.alias, fmt) - - -def _label(s): - return s.name.rsplit('.', 1)[-1] - - -class StepFormatter(GraphFormatter): - """Graph formatter for :class:`Blueprint`.""" - - blueprint_prefix = '⧉' - conditional_prefix = '∘' - blueprint_scheme = { - 'shape': 'parallelogram', - 'color': 'slategray4', - 'fillcolor': 'slategray3', - } - - def label(self, step): - return step and '{0}{1}'.format( - self._get_prefix(step), - (step.label or _label(step)).encode('utf-8', 'ignore'), - ) - - def _get_prefix(self, step): - if step.last: - return self.blueprint_prefix - if step.conditional: - return self.conditional_prefix - return '' - - def node(self, obj, **attrs): - scheme = self.blueprint_scheme if obj.last else self.node_scheme - return self.draw_node(obj, scheme, attrs) - - def edge(self, a, b, **attrs): - if a.last: - attrs.update(arrowhead='none', color='darkseagreen3') - return self.draw_edge(a, b, self.edge_scheme, attrs) - - -class Blueprint(object): - """Blueprint containing bootsteps that can be applied to objects. - - :keyword steps: List of steps. - :keyword name: Set explicit name for this blueprint. - :keyword app: Set the Celery app for this blueprint. - :keyword on_start: Optional callback applied after blueprint start. - :keyword on_close: Optional callback applied before blueprint close. - :keyword on_stopped: Optional callback applied after blueprint stopped. - - """ - GraphFormatter = StepFormatter - - name = None - state = None - started = 0 - default_steps = set() - state_to_name = { - 0: 'initializing', - RUN: 'running', - CLOSE: 'closing', - TERMINATE: 'terminating', - } - - def __init__(self, steps=None, name=None, app=None, - on_start=None, on_close=None, on_stopped=None): - self.app = app - self.name = name or self.name or qualname(type(self)) - self.types = set(steps or []) | set(self.default_steps) - self.on_start = on_start - self.on_close = on_close - self.on_stopped = on_stopped - self.shutdown_complete = Event() - self.steps = {} - - def start(self, parent): - self.state = RUN - if self.on_start: - self.on_start() - for i, step in enumerate(s for s in parent.steps if s is not None): - self._debug('Starting %s', step.alias) - self.started = i + 1 - step.start(parent) - debug('^-- substep ok') - - def human_state(self): - return self.state_to_name[self.state or 0] - - def info(self, parent): - info = {} - for step in parent.steps: - info.update(step.info(parent) or {}) - return info - - def close(self, parent): - if self.on_close: - self.on_close() - self.send_all(parent, 'close', 'closing', reverse=False) - - def restart(self, parent, method='stop', - description='restarting', propagate=False): - self.send_all(parent, method, description, propagate=propagate) - - def send_all(self, parent, method, - description=None, reverse=True, propagate=True, args=()): - description = description or method.replace('_', ' ') - steps = reversed(parent.steps) if reverse else parent.steps - for step in steps: - if step: - fun = getattr(step, method, None) - if fun is not None: - self._debug('%s %s...', - description.capitalize(), step.alias) - try: - fun(parent, *args) - except Exception as exc: - if propagate: - raise - logger.error( - 'Error on %s %s: %r', - description, step.alias, exc, exc_info=1, - ) - - def stop(self, parent, close=True, terminate=False): - what = 'terminating' if terminate else 'stopping' - if self.state in (CLOSE, TERMINATE): - return - - if self.state != RUN or self.started != len(parent.steps): - # Not fully started, can safely exit. - self.state = TERMINATE - self.shutdown_complete.set() - return - self.close(parent) - self.state = CLOSE - - self.restart( - parent, 'terminate' if terminate else 'stop', - description=what, propagate=False, - ) - - if self.on_stopped: - self.on_stopped() - self.state = TERMINATE - self.shutdown_complete.set() - - def join(self, timeout=None): - try: - # Will only get here if running green, - # makes sure all greenthreads have exited. - self.shutdown_complete.wait(timeout=timeout) - except IGNORE_ERRORS: - pass - - def apply(self, parent, **kwargs): - """Apply the steps in this blueprint to an object. - - This will apply the ``__init__`` and ``include`` methods - of each step, with the object as argument:: - - step = Step(obj) - ... - step.include(obj) - - For :class:`StartStopStep` the services created - will also be added to the objects ``steps`` attribute. - - """ - self._debug('Preparing bootsteps.') - order = self.order = [] - steps = self.steps = self.claim_steps() - - self._debug('Building graph...') - for S in self._finalize_steps(steps): - step = S(parent, **kwargs) - steps[step.name] = step - order.append(step) - self._debug('New boot order: {%s}', - ', '.join(s.alias for s in self.order)) - for step in order: - step.include(parent) - return self - - def connect_with(self, other): - self.graph.adjacent.update(other.graph.adjacent) - self.graph.add_edge(type(other.order[0]), type(self.order[-1])) - - def __getitem__(self, name): - return self.steps[name] - - def _find_last(self): - return next((C for C in values(self.steps) if C.last), None) - - def _firstpass(self, steps): - for step in values(steps): - step.requires = [symbol_by_name(dep) for dep in step.requires] - stream = deque(step.requires for step in values(steps)) - while stream: - for node in stream.popleft(): - node = symbol_by_name(node) - if node.name not in self.steps: - steps[node.name] = node - stream.append(node.requires) - - def _finalize_steps(self, steps): - last = self._find_last() - self._firstpass(steps) - it = ((C, C.requires) for C in values(steps)) - G = self.graph = DependencyGraph( - it, formatter=self.GraphFormatter(root=last), - ) - if last: - for obj in G: - if obj != last: - G.add_edge(last, obj) - try: - return G.topsort() - except KeyError as exc: - raise KeyError('unknown bootstep: %s' % exc) - - def claim_steps(self): - return dict(self.load_step(step) for step in self._all_steps()) - - def _all_steps(self): - return self.types | self.app.steps[self.name.lower()] - - def load_step(self, step): - step = symbol_by_name(step) - return step.name, step - - def _debug(self, msg, *args): - return debug(_pre(self, msg), *args) - - @property - def alias(self): - return _label(self) - - -class StepType(type): - """Metaclass for steps.""" - - def __new__(cls, name, bases, attrs): - module = attrs.get('__module__') - qname = '{0}.{1}'.format(module, name) if module else name - attrs.update( - __qualname__=qname, - name=attrs.get('name') or qname, - ) - return super(StepType, cls).__new__(cls, name, bases, attrs) - - def __str__(self): - return self.name - - def __repr__(self): - return 'step:{0.name}{{{0.requires!r}}}'.format(self) - - -@with_metaclass(StepType) -class Step(object): - """A Bootstep. - - The :meth:`__init__` method is called when the step - is bound to a parent object, and can as such be used - to initialize attributes in the parent object at - parent instantiation-time. - - """ - - #: Optional step name, will use qualname if not specified. - name = None - - #: Optional short name used for graph outputs and in logs. - label = None - - #: Set this to true if the step is enabled based on some condition. - conditional = False - - #: List of other steps that that must be started before this step. - #: Note that all dependencies must be in the same blueprint. - requires = () - - #: This flag is reserved for the workers Consumer, - #: since it is required to always be started last. - #: There can only be one object marked last - #: in every blueprint. - last = False - - #: This provides the default for :meth:`include_if`. - enabled = True - - def __init__(self, parent, **kwargs): - pass - - def include_if(self, parent): - """An optional predicate that decides whether this - step should be created.""" - return self.enabled - - def instantiate(self, name, *args, **kwargs): - return instantiate(name, *args, **kwargs) - - def _should_include(self, parent): - if self.include_if(parent): - return True, self.create(parent) - return False, None - - def include(self, parent): - return self._should_include(parent)[0] - - def create(self, parent): - """Create the step.""" - pass - - def __repr__(self): - return ''.format(self) - - @property - def alias(self): - return self.label or _label(self) - - def info(self, obj): - pass - - -class StartStopStep(Step): - - #: Optional obj created by the :meth:`create` method. - #: This is used by :class:`StartStopStep` to keep the - #: original service object. - obj = None - - def start(self, parent): - if self.obj: - return self.obj.start() - - def stop(self, parent): - if self.obj: - return self.obj.stop() - - def close(self, parent): - pass - - def terminate(self, parent): - if self.obj: - return getattr(self.obj, 'terminate', self.obj.stop)() - - def include(self, parent): - inc, ret = self._should_include(parent) - if inc: - self.obj = ret - parent.steps.append(self) - return inc - - -class ConsumerStep(StartStopStep): - requires = ('celery.worker.consumer:Connection', ) - consumers = None - - def get_consumers(self, channel): - raise NotImplementedError('missing get_consumers') - - def start(self, c): - channel = c.connection.channel() - self.consumers = self.get_consumers(channel) - for consumer in self.consumers or []: - consumer.consume() - - def stop(self, c): - self._close(c, True) - - def shutdown(self, c): - self._close(c, False) - - def _close(self, c, cancel_consumers=True): - channels = set() - for consumer in self.consumers or []: - if cancel_consumers: - ignore_errors(c.connection, consumer.cancel) - if consumer.channel: - channels.add(consumer.channel) - for channel in channels: - ignore_errors(c.connection, channel.close) diff --git a/thesisenv/lib/python3.6/site-packages/celery/canvas.py b/thesisenv/lib/python3.6/site-packages/celery/canvas.py deleted file mode 100644 index 4149e39..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/canvas.py +++ /dev/null @@ -1,698 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.canvas - ~~~~~~~~~~~~~ - - Composing task workflows. - - Documentation for some of these types are in :mod:`celery`. - You should import these from :mod:`celery` and not this module. - - -""" -from __future__ import absolute_import - -from collections import MutableSequence -from copy import deepcopy -from functools import partial as _partial, reduce -from operator import itemgetter -from itertools import chain as _chain - -from kombu.utils import cached_property, fxrange, kwdict, reprcall, uuid - -from celery._state import current_app -from celery.utils.functional import ( - maybe_list, is_list, regen, - chunks as _chunks, -) -from celery.utils.text import truncate - -__all__ = ['Signature', 'chain', 'xmap', 'xstarmap', 'chunks', - 'group', 'chord', 'signature', 'maybe_signature'] - - -class _getitem_property(object): - """Attribute -> dict key descriptor. - - The target object must support ``__getitem__``, - and optionally ``__setitem__``. - - Example: - - >>> from collections import defaultdict - - >>> class Me(dict): - ... deep = defaultdict(dict) - ... - ... foo = _getitem_property('foo') - ... deep_thing = _getitem_property('deep.thing') - - - >>> me = Me() - >>> me.foo - None - - >>> me.foo = 10 - >>> me.foo - 10 - >>> me['foo'] - 10 - - >>> me.deep_thing = 42 - >>> me.deep_thing - 42 - >>> me.deep - defaultdict(, {'thing': 42}) - - """ - - def __init__(self, keypath): - path, _, self.key = keypath.rpartition('.') - self.path = path.split('.') if path else None - - def _path(self, obj): - return (reduce(lambda d, k: d[k], [obj] + self.path) if self.path - else obj) - - def __get__(self, obj, type=None): - if obj is None: - return type - return self._path(obj).get(self.key) - - def __set__(self, obj, value): - self._path(obj)[self.key] = value - - -def maybe_unroll_group(g): - """Unroll group with only one member.""" - # Issue #1656 - try: - size = len(g.tasks) - except TypeError: - try: - size = g.tasks.__length_hint__() - except (AttributeError, TypeError): - pass - else: - return list(g.tasks)[0] if size == 1 else g - else: - return g.tasks[0] if size == 1 else g - - -def _upgrade(fields, sig): - """Used by custom signatures in .from_dict, to keep common fields.""" - sig.update(chord_size=fields.get('chord_size')) - return sig - - -class Signature(dict): - """Class that wraps the arguments and execution options - for a single task invocation. - - Used as the parts in a :class:`group` and other constructs, - or to pass tasks around as callbacks while being compatible - with serializers with a strict type subset. - - :param task: Either a task class/instance, or the name of a task. - :keyword args: Positional arguments to apply. - :keyword kwargs: Keyword arguments to apply. - :keyword options: Additional options to :meth:`Task.apply_async`. - - Note that if the first argument is a :class:`dict`, the other - arguments will be ignored and the values in the dict will be used - instead. - - >>> s = signature('tasks.add', args=(2, 2)) - >>> signature(s) - {'task': 'tasks.add', args=(2, 2), kwargs={}, options={}} - - """ - TYPES = {} - _app = _type = None - - @classmethod - def register_type(cls, subclass, name=None): - cls.TYPES[name or subclass.__name__] = subclass - return subclass - - @classmethod - def from_dict(self, d, app=None): - typ = d.get('subtask_type') - if typ: - return self.TYPES[typ].from_dict(kwdict(d), app=app) - return Signature(d, app=app) - - def __init__(self, task=None, args=None, kwargs=None, options=None, - type=None, subtask_type=None, immutable=False, - app=None, **ex): - self._app = app - init = dict.__init__ - - if isinstance(task, dict): - return init(self, task) # works like dict(d) - - # Also supports using task class/instance instead of string name. - try: - task_name = task.name - except AttributeError: - task_name = task - else: - self._type = task - - init(self, - task=task_name, args=tuple(args or ()), - kwargs=kwargs or {}, - options=dict(options or {}, **ex), - subtask_type=subtask_type, - immutable=immutable, - chord_size=None) - - def __call__(self, *partial_args, **partial_kwargs): - args, kwargs, _ = self._merge(partial_args, partial_kwargs, None) - return self.type(*args, **kwargs) - - def delay(self, *partial_args, **partial_kwargs): - return self.apply_async(partial_args, partial_kwargs) - - def apply(self, args=(), kwargs={}, **options): - """Apply this task locally.""" - # For callbacks: extra args are prepended to the stored args. - args, kwargs, options = self._merge(args, kwargs, options) - return self.type.apply(args, kwargs, **options) - - def _merge(self, args=(), kwargs={}, options={}): - if self.immutable: - return (self.args, self.kwargs, - dict(self.options, **options) if options else self.options) - return (tuple(args) + tuple(self.args) if args else self.args, - dict(self.kwargs, **kwargs) if kwargs else self.kwargs, - dict(self.options, **options) if options else self.options) - - def clone(self, args=(), kwargs={}, app=None, **opts): - # need to deepcopy options so origins links etc. is not modified. - if args or kwargs or opts: - args, kwargs, opts = self._merge(args, kwargs, opts) - else: - args, kwargs, opts = self.args, self.kwargs, self.options - s = Signature.from_dict({'task': self.task, 'args': tuple(args), - 'kwargs': kwargs, 'options': deepcopy(opts), - 'subtask_type': self.subtask_type, - 'chord_size': self.chord_size, - 'immutable': self.immutable}, - app=app or self._app) - s._type = self._type - return s - partial = clone - - def freeze(self, _id=None, group_id=None, chord=None): - opts = self.options - try: - tid = opts['task_id'] - except KeyError: - tid = opts['task_id'] = _id or uuid() - if 'reply_to' not in opts: - opts['reply_to'] = self.app.oid - if group_id: - opts['group_id'] = group_id - if chord: - opts['chord'] = chord - return self.app.AsyncResult(tid) - _freeze = freeze - - def replace(self, args=None, kwargs=None, options=None): - s = self.clone() - if args is not None: - s.args = args - if kwargs is not None: - s.kwargs = kwargs - if options is not None: - s.options = options - return s - - def set(self, immutable=None, **options): - if immutable is not None: - self.set_immutable(immutable) - self.options.update(options) - return self - - def set_immutable(self, immutable): - self.immutable = immutable - - def apply_async(self, args=(), kwargs={}, **options): - try: - _apply = self._apply_async - except IndexError: # no tasks for chain, etc to find type - return - # For callbacks: extra args are prepended to the stored args. - if args or kwargs or options: - args, kwargs, options = self._merge(args, kwargs, options) - else: - args, kwargs, options = self.args, self.kwargs, self.options - return _apply(args, kwargs, **options) - - def append_to_list_option(self, key, value): - items = self.options.setdefault(key, []) - if not isinstance(items, MutableSequence): - items = self.options[key] = [items] - if value not in items: - items.append(value) - return value - - def link(self, callback): - return self.append_to_list_option('link', callback) - - def link_error(self, errback): - return self.append_to_list_option('link_error', errback) - - def flatten_links(self): - return list(_chain.from_iterable(_chain( - [[self]], - (link.flatten_links() - for link in maybe_list(self.options.get('link')) or []) - ))) - - def __or__(self, other): - if isinstance(other, group): - other = maybe_unroll_group(other) - if not isinstance(self, chain) and isinstance(other, chain): - return chain((self, ) + other.tasks, app=self._app) - elif isinstance(other, chain): - return chain(*self.tasks + other.tasks, app=self._app) - elif isinstance(other, Signature): - if isinstance(self, chain): - return chain(*self.tasks + (other, ), app=self._app) - return chain(self, other, app=self._app) - return NotImplemented - - def __deepcopy__(self, memo): - memo[id(self)] = self - return dict(self) - - def __invert__(self): - return self.apply_async().get() - - def __reduce__(self): - # for serialization, the task type is lazily loaded, - # and not stored in the dict itself. - return subtask, (dict(self), ) - - def reprcall(self, *args, **kwargs): - args, kwargs, _ = self._merge(args, kwargs, {}) - return reprcall(self['task'], args, kwargs) - - def election(self): - type = self.type - app = type.app - tid = self.options.get('task_id') or uuid() - - with app.producer_or_acquire(None) as P: - props = type.backend.on_task_call(P, tid) - app.control.election(tid, 'task', self.clone(task_id=tid, **props), - connection=P.connection) - return type.AsyncResult(tid) - - def __repr__(self): - return self.reprcall() - - @cached_property - def type(self): - return self._type or self.app.tasks[self['task']] - - @cached_property - def app(self): - return self._app or current_app - - @cached_property - def AsyncResult(self): - try: - return self.type.AsyncResult - except KeyError: # task not registered - return self.app.AsyncResult - - @cached_property - def _apply_async(self): - try: - return self.type.apply_async - except KeyError: - return _partial(self.app.send_task, self['task']) - id = _getitem_property('options.task_id') - task = _getitem_property('task') - args = _getitem_property('args') - kwargs = _getitem_property('kwargs') - options = _getitem_property('options') - subtask_type = _getitem_property('subtask_type') - chord_size = _getitem_property('chord_size') - immutable = _getitem_property('immutable') - - -@Signature.register_type -class chain(Signature): - - def __init__(self, *tasks, **options): - tasks = (regen(tasks[0]) if len(tasks) == 1 and is_list(tasks[0]) - else tasks) - Signature.__init__( - self, 'celery.chain', (), {'tasks': tasks}, **options - ) - self.tasks = tasks - self.subtask_type = 'chain' - - def __call__(self, *args, **kwargs): - if self.tasks: - return self.apply_async(args, kwargs) - - @classmethod - def from_dict(self, d, app=None): - tasks = [maybe_signature(t, app=app) for t in d['kwargs']['tasks']] - if d['args'] and tasks: - # partial args passed on to first task in chain (Issue #1057). - tasks[0]['args'] = tasks[0]._merge(d['args'])[0] - return _upgrade(d, chain(*tasks, app=app, **d['options'])) - - @property - def type(self): - try: - return self._type or self.tasks[0].type.app.tasks['celery.chain'] - except KeyError: - return self.app.tasks['celery.chain'] - - def __repr__(self): - return ' | '.join(repr(t) for t in self.tasks) - - -class _basemap(Signature): - _task_name = None - _unpack_args = itemgetter('task', 'it') - - def __init__(self, task, it, **options): - Signature.__init__( - self, self._task_name, (), - {'task': task, 'it': regen(it)}, immutable=True, **options - ) - - def apply_async(self, args=(), kwargs={}, **opts): - # need to evaluate generators - task, it = self._unpack_args(self.kwargs) - return self.type.apply_async( - (), {'task': task, 'it': list(it)}, **opts - ) - - @classmethod - def from_dict(cls, d, app=None): - return _upgrade( - d, cls(*cls._unpack_args(d['kwargs']), app=app, **d['options']), - ) - - -@Signature.register_type -class xmap(_basemap): - _task_name = 'celery.map' - - def __repr__(self): - task, it = self._unpack_args(self.kwargs) - return '[{0}(x) for x in {1}]'.format(task.task, - truncate(repr(it), 100)) - - -@Signature.register_type -class xstarmap(_basemap): - _task_name = 'celery.starmap' - - def __repr__(self): - task, it = self._unpack_args(self.kwargs) - return '[{0}(*x) for x in {1}]'.format(task.task, - truncate(repr(it), 100)) - - -@Signature.register_type -class chunks(Signature): - _unpack_args = itemgetter('task', 'it', 'n') - - def __init__(self, task, it, n, **options): - Signature.__init__( - self, 'celery.chunks', (), - {'task': task, 'it': regen(it), 'n': n}, - immutable=True, **options - ) - - @classmethod - def from_dict(self, d, app=None): - return _upgrade( - d, chunks(*self._unpack_args( - d['kwargs']), app=app, **d['options']), - ) - - def apply_async(self, args=(), kwargs={}, **opts): - return self.group().apply_async(args, kwargs, **opts) - - def __call__(self, **options): - return self.group()(**options) - - def group(self): - # need to evaluate generators - task, it, n = self._unpack_args(self.kwargs) - return group((xstarmap(task, part, app=self._app) - for part in _chunks(iter(it), n)), - app=self._app) - - @classmethod - def apply_chunks(cls, task, it, n, app=None): - return cls(task, it, n, app=app)() - - -def _maybe_group(tasks): - if isinstance(tasks, group): - tasks = list(tasks.tasks) - elif isinstance(tasks, Signature): - tasks = [tasks] - else: - tasks = regen(tasks) - return tasks - - -def _maybe_clone(tasks, app): - return [s.clone() if isinstance(s, Signature) else signature(s, app=app) - for s in tasks] - - -@Signature.register_type -class group(Signature): - - def __init__(self, *tasks, **options): - if len(tasks) == 1: - tasks = _maybe_group(tasks[0]) - Signature.__init__( - self, 'celery.group', (), {'tasks': tasks}, **options - ) - self.tasks, self.subtask_type = tasks, 'group' - - @classmethod - def from_dict(self, d, app=None): - tasks = [maybe_signature(t, app=app) for t in d['kwargs']['tasks']] - if d['args'] and tasks: - # partial args passed on to all tasks in the group (Issue #1057). - for task in tasks: - task['args'] = task._merge(d['args'])[0] - return _upgrade(d, group(tasks, app=app, **kwdict(d['options']))) - - def apply_async(self, args=(), kwargs=None, add_to_parent=True, **options): - tasks = _maybe_clone(self.tasks, app=self._app) - if not tasks: - return self.freeze() - type = self.type - return type(*type.prepare(dict(self.options, **options), tasks, args), - add_to_parent=add_to_parent) - - def set_immutable(self, immutable): - for task in self.tasks: - task.set_immutable(immutable) - - def link(self, sig): - # Simply link to first task - sig = sig.clone().set(immutable=True) - return self.tasks[0].link(sig) - - def link_error(self, sig): - sig = sig.clone().set(immutable=True) - return self.tasks[0].link_error(sig) - - def apply(self, *args, **kwargs): - if not self.tasks: - return self.freeze() # empty group returns GroupResult - return Signature.apply(self, *args, **kwargs) - - def __call__(self, *partial_args, **options): - return self.apply_async(partial_args, **options) - - def freeze(self, _id=None, group_id=None, chord=None): - opts = self.options - try: - gid = opts['task_id'] - except KeyError: - gid = opts['task_id'] = uuid() - if group_id: - opts['group_id'] = group_id - if chord: - opts['chord'] = group_id - new_tasks, results = [], [] - for task in self.tasks: - task = maybe_signature(task, app=self._app).clone() - results.append(task.freeze(group_id=group_id, chord=chord)) - new_tasks.append(task) - self.tasks = self.kwargs['tasks'] = new_tasks - return self.app.GroupResult(gid, results) - _freeze = freeze - - def skew(self, start=1.0, stop=None, step=1.0): - it = fxrange(start, stop, step, repeatlast=True) - for task in self.tasks: - task.set(countdown=next(it)) - return self - - def __iter__(self): - return iter(self.tasks) - - def __repr__(self): - return repr(self.tasks) - - @property - def app(self): - return self._app or (self.tasks[0].app if self.tasks else current_app) - - @property - def type(self): - if self._type: - return self._type - # taking the app from the first task in the list, there may be a - # better solution for this, e.g. to consolidate tasks with the same - # app and apply them in batches. - return self.app.tasks[self['task']] - - -@Signature.register_type -class chord(Signature): - - def __init__(self, header, body=None, task='celery.chord', - args=(), kwargs={}, **options): - Signature.__init__( - self, task, args, - dict(kwargs, header=_maybe_group(header), - body=maybe_signature(body, app=self._app)), **options - ) - self.subtask_type = 'chord' - - def apply(self, args=(), kwargs={}, **options): - # For callbacks: extra args are prepended to the stored args. - args, kwargs, options = self._merge(args, kwargs, options) - return self.type.apply(args, kwargs, **options) - - def freeze(self, _id=None, group_id=None, chord=None): - return self.body.freeze(_id, group_id=group_id, chord=chord) - - @classmethod - def from_dict(self, d, app=None): - args, d['kwargs'] = self._unpack_args(**kwdict(d['kwargs'])) - return _upgrade(d, self(*args, app=app, **kwdict(d))) - - @staticmethod - def _unpack_args(header=None, body=None, **kwargs): - # Python signatures are better at extracting keys from dicts - # than manually popping things off. - return (header, body), kwargs - - @property - def app(self): - # we will be able to fix this mess in 3.2 when we no longer - # require an actual task implementation for chord/group - if self._app: - return self._app - app = None if self.body is None else self.body.app - if app is None: - try: - app = self.tasks[0].app - except IndexError: - app = None - return app if app is not None else current_app - - @property - def type(self): - if self._type: - return self._type - return self.app.tasks['celery.chord'] - - def delay(self, *partial_args, **partial_kwargs): - # There's no partial_kwargs for chord. - return self.apply_async(partial_args) - - def apply_async(self, args=(), kwargs={}, task_id=None, - producer=None, publisher=None, connection=None, - router=None, result_cls=None, **options): - args = (tuple(args) + tuple(self.args) - if args and not self.immutable else self.args) - body = kwargs.get('body') or self.kwargs['body'] - kwargs = dict(self.kwargs, **kwargs) - body = body.clone(**options) - - _chord = self.type - if _chord.app.conf.CELERY_ALWAYS_EAGER: - return self.apply(args, kwargs, task_id=task_id, **options) - res = body.freeze(task_id) - parent = _chord(self.tasks, body, args, **options) - res.parent = parent - return res - - def __call__(self, body=None, **options): - return self.apply_async( - (), {'body': body} if body else {}, **options) - - def clone(self, *args, **kwargs): - s = Signature.clone(self, *args, **kwargs) - # need to make copy of body - try: - s.kwargs['body'] = s.kwargs['body'].clone() - except (AttributeError, KeyError): - pass - return s - - def link(self, callback): - self.body.link(callback) - return callback - - def link_error(self, errback): - self.body.link_error(errback) - return errback - - def set_immutable(self, immutable): - # changes mutability of header only, not callback. - for task in self.tasks: - task.set_immutable(immutable) - - def __repr__(self): - if self.body: - return self.body.reprcall(self.tasks) - return ''.format(self) - - tasks = _getitem_property('kwargs.header') - body = _getitem_property('kwargs.body') - - -def signature(varies, args=(), kwargs={}, options={}, app=None, **kw): - if isinstance(varies, dict): - if isinstance(varies, Signature): - return varies.clone(app=app) - return Signature.from_dict(varies, app=app) - return Signature(varies, args, kwargs, options, app=app, **kw) -subtask = signature # XXX compat - - -def maybe_signature(d, app=None): - if d is not None: - if isinstance(d, dict): - if not isinstance(d, Signature): - return signature(d, app=app) - elif isinstance(d, list): - return [maybe_signature(s, app=app) for s in d] - if app is not None: - d._app = app - return d -maybe_subtask = maybe_signature # XXX compat diff --git a/thesisenv/lib/python3.6/site-packages/celery/concurrency/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/concurrency/__init__.py deleted file mode 100644 index c58fdbc..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/concurrency/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.concurrency - ~~~~~~~~~~~~~~~~~~ - - Pool implementation abstract factory, and alias definitions. - -""" -from __future__ import absolute_import - -# Import from kombu directly as it's used -# early in the import stage, where celery.utils loads -# too much (e.g. for eventlet patching) -from kombu.utils import symbol_by_name - -__all__ = ['get_implementation'] - -ALIASES = { - 'prefork': 'celery.concurrency.prefork:TaskPool', - 'eventlet': 'celery.concurrency.eventlet:TaskPool', - 'gevent': 'celery.concurrency.gevent:TaskPool', - 'threads': 'celery.concurrency.threads:TaskPool', - 'solo': 'celery.concurrency.solo:TaskPool', - 'processes': 'celery.concurrency.prefork:TaskPool', # XXX compat alias -} - - -def get_implementation(cls): - return symbol_by_name(cls, ALIASES) diff --git a/thesisenv/lib/python3.6/site-packages/celery/concurrency/asynpool.py b/thesisenv/lib/python3.6/site-packages/celery/concurrency/asynpool.py deleted file mode 100644 index bc29d9c..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/concurrency/asynpool.py +++ /dev/null @@ -1,1270 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.concurrency.asynpool - ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - .. note:: - - This module will be moved soon, so don't use it directly. - - Non-blocking version of :class:`multiprocessing.Pool`. - - This code deals with three major challenges: - - 1) Starting up child processes and keeping them running. - 2) Sending jobs to the processes and receiving results back. - 3) Safely shutting down this system. - -""" -from __future__ import absolute_import - -import errno -import gc -import os -import select -import socket -import struct -import sys -import time - -from collections import deque, namedtuple -from io import BytesIO -from pickle import HIGHEST_PROTOCOL -from time import sleep -from weakref import WeakValueDictionary, ref - -from amqp.utils import promise -from billiard.pool import RUN, TERMINATE, ACK, NACK, WorkersJoined -from billiard import pool as _pool -from billiard.compat import buf_t, setblocking, isblocking -from billiard.einfo import ExceptionInfo -from billiard.queues import _SimpleQueue -from kombu.async import READ, WRITE, ERR -from kombu.serialization import pickle as _pickle -from kombu.utils import fxrange -from kombu.utils.compat import get_errno -from kombu.utils.eventio import SELECT_BAD_FD -from celery.five import Counter, items, string_t, text_t, values -from celery.utils.log import get_logger -from celery.utils.text import truncate -from celery.worker import state as worker_state - -try: - from _billiard import read as __read__ - from struct import unpack_from as _unpack_from - memoryview = memoryview - readcanbuf = True - - if sys.version_info[0] == 2 and sys.version_info < (2, 7, 6): - - def unpack_from(fmt, view, _unpack_from=_unpack_from): # noqa - return _unpack_from(fmt, view.tobytes()) # <- memoryview - else: - # unpack_from supports memoryview in 2.7.6 and 3.3+ - unpack_from = _unpack_from # noqa - -except (ImportError, NameError): # pragma: no cover - - def __read__(fd, buf, size, read=os.read): # noqa - chunk = read(fd, size) - n = len(chunk) - if n != 0: - buf.write(chunk) - return n - readcanbuf = False # noqa - - def unpack_from(fmt, iobuf, unpack=struct.unpack): # noqa - return unpack(fmt, iobuf.getvalue()) # <-- BytesIO - - -logger = get_logger(__name__) -error, debug = logger.error, logger.debug - -UNAVAIL = frozenset([errno.EAGAIN, errno.EINTR]) - -#: Constant sent by child process when started (ready to accept work) -WORKER_UP = 15 - -#: A process must have started before this timeout (in secs.) expires. -PROC_ALIVE_TIMEOUT = 4.0 - -SCHED_STRATEGY_PREFETCH = 1 -SCHED_STRATEGY_FAIR = 4 - -SCHED_STRATEGIES = { - None: SCHED_STRATEGY_PREFETCH, - 'fair': SCHED_STRATEGY_FAIR, -} - -RESULT_MAXLEN = 128 - -Ack = namedtuple('Ack', ('id', 'fd', 'payload')) - - -def gen_not_started(gen): - # gi_frame is None when generator stopped. - return gen.gi_frame and gen.gi_frame.f_lasti == -1 - - -def _get_job_writer(job): - try: - writer = job._writer - except AttributeError: - pass - else: - return writer() # is a weakref - - -def _select(readers=None, writers=None, err=None, timeout=0): - """Simple wrapper to :class:`~select.select`. - - :param readers: Set of reader fds to test if readable. - :param writers: Set of writer fds to test if writable. - :param err: Set of fds to test for error condition. - - All fd sets passed must be mutable as this function - will remove non-working fds from them, this also means - the caller must make sure there are still fds in the sets - before calling us again. - - :returns: tuple of ``(readable, writable, again)``, where - ``readable`` is a set of fds that have data available for read, - ``writable`` is a set of fds that is ready to be written to - and ``again`` is a flag that if set means the caller must - throw away the result and call us again. - - """ - readers = set() if readers is None else readers - writers = set() if writers is None else writers - err = set() if err is None else err - try: - r, w, e = select.select(readers, writers, err, timeout) - if e: - r = list(set(r) | set(e)) - return r, w, 0 - except (select.error, socket.error) as exc: - if get_errno(exc) == errno.EINTR: - return [], [], 1 - elif get_errno(exc) in SELECT_BAD_FD: - for fd in readers | writers | err: - try: - select.select([fd], [], [], 0) - except (select.error, socket.error) as exc: - if get_errno(exc) not in SELECT_BAD_FD: - raise - readers.discard(fd) - writers.discard(fd) - err.discard(fd) - return [], [], 1 - else: - raise - - -def _repr_result(obj): - try: - return repr(obj) - except Exception as orig_exc: - try: - return text_t(obj) - except UnicodeDecodeError: - if isinstance(obj, string_t): - try: - return obj.decode('utf-8', errors='replace') - except Exception: - pass - return ''.format( - orig_exc, - ) - - -class Worker(_pool.Worker): - """Pool worker process.""" - dead = False - - def on_loop_start(self, pid): - # our version sends a WORKER_UP message when the process is ready - # to accept work, this will tell the parent that the inqueue fd - # is writable. - self.outq.put((WORKER_UP, (pid, ))) - - def prepare_result(self, result, maxlen=RESULT_MAXLEN, truncate=truncate): - if not isinstance(result, ExceptionInfo): - return truncate(_repr_result(result), maxlen) - return result - - -class ResultHandler(_pool.ResultHandler): - """Handles messages from the pool processes.""" - - def __init__(self, *args, **kwargs): - self.fileno_to_outq = kwargs.pop('fileno_to_outq') - self.on_process_alive = kwargs.pop('on_process_alive') - super(ResultHandler, self).__init__(*args, **kwargs) - # add our custom message handler - self.state_handlers[WORKER_UP] = self.on_process_alive - - def _recv_message(self, add_reader, fd, callback, - __read__=__read__, readcanbuf=readcanbuf, - BytesIO=BytesIO, unpack_from=unpack_from, - load=_pickle.load): - Hr = Br = 0 - if readcanbuf: - buf = bytearray(4) - bufv = memoryview(buf) - else: - buf = bufv = BytesIO() - # header - - while Hr < 4: - try: - n = __read__( - fd, bufv[Hr:] if readcanbuf else bufv, 4 - Hr, - ) - except OSError as exc: - if get_errno(exc) not in UNAVAIL: - raise - yield - else: - if n == 0: - raise (OSError('End of file during message') if Hr - else EOFError()) - Hr += n - - body_size, = unpack_from('>i', bufv) - if readcanbuf: - buf = bytearray(body_size) - bufv = memoryview(buf) - else: - buf = bufv = BytesIO() - - while Br < body_size: - try: - n = __read__( - fd, bufv[Br:] if readcanbuf else bufv, body_size - Br, - ) - except OSError as exc: - if get_errno(exc) not in UNAVAIL: - raise - yield - else: - if n == 0: - raise (OSError('End of file during message') if Br - else EOFError()) - Br += n - add_reader(fd, self.handle_event, fd) - if readcanbuf: - message = load(BytesIO(bufv)) - else: - bufv.seek(0) - message = load(bufv) - if message: - callback(message) - - def _make_process_result(self, hub): - """Coroutine that reads messages from the pool processes - and calls the appropriate handler.""" - fileno_to_outq = self.fileno_to_outq - on_state_change = self.on_state_change - add_reader = hub.add_reader - remove_reader = hub.remove_reader - recv_message = self._recv_message - - def on_result_readable(fileno): - try: - fileno_to_outq[fileno] - except KeyError: # process gone - return remove_reader(fileno) - it = recv_message(add_reader, fileno, on_state_change) - try: - next(it) - except StopIteration: - pass - except (IOError, OSError, EOFError): - remove_reader(fileno) - else: - add_reader(fileno, it) - return on_result_readable - - def register_with_event_loop(self, hub): - self.handle_event = self._make_process_result(hub) - - def handle_event(self, fileno): - raise RuntimeError('Not registered with event loop') - - def on_stop_not_started(self): - """This method is always used to stop when the helper thread is not - started.""" - cache = self.cache - check_timeouts = self.check_timeouts - fileno_to_outq = self.fileno_to_outq - on_state_change = self.on_state_change - join_exited_workers = self.join_exited_workers - - # flush the processes outqueues until they have all terminated. - outqueues = set(fileno_to_outq) - while cache and outqueues and self._state != TERMINATE: - if check_timeouts is not None: - # make sure tasks with a time limit will time out. - check_timeouts() - # cannot iterate and remove at the same time - pending_remove_fd = set() - for fd in outqueues: - self._flush_outqueue( - fd, pending_remove_fd.discard, fileno_to_outq, - on_state_change, - ) - try: - join_exited_workers(shutdown=True) - except WorkersJoined: - return debug('result handler: all workers terminated') - outqueues.difference_update(pending_remove_fd) - - def _flush_outqueue(self, fd, remove, process_index, on_state_change): - try: - proc = process_index[fd] - except KeyError: - # process already found terminated - # which means its outqueue has already been processed - # by the worker lost handler. - return remove(fd) - - reader = proc.outq._reader - try: - setblocking(reader, 1) - except (OSError, IOError): - return remove(fd) - try: - if reader.poll(0): - task = reader.recv() - else: - task = None - sleep(0.5) - except (IOError, EOFError): - return remove(fd) - else: - if task: - on_state_change(task) - finally: - try: - setblocking(reader, 0) - except (OSError, IOError): - return remove(fd) - - -class AsynPool(_pool.Pool): - """Pool version that uses AIO instead of helper threads.""" - ResultHandler = ResultHandler - Worker = Worker - - def __init__(self, processes=None, synack=False, - sched_strategy=None, *args, **kwargs): - self.sched_strategy = SCHED_STRATEGIES.get(sched_strategy, - sched_strategy) - processes = self.cpu_count() if processes is None else processes - self.synack = synack - # create queue-pairs for all our processes in advance. - self._queues = dict((self.create_process_queues(), None) - for _ in range(processes)) - - # inqueue fileno -> process mapping - self._fileno_to_inq = {} - # outqueue fileno -> process mapping - self._fileno_to_outq = {} - # synqueue fileno -> process mapping - self._fileno_to_synq = {} - - # We keep track of processes that have not yet - # sent a WORKER_UP message. If a process fails to send - # this message within proc_up_timeout we terminate it - # and hope the next process will recover. - self._proc_alive_timeout = PROC_ALIVE_TIMEOUT - self._waiting_to_start = set() - - # denormalized set of all inqueues. - self._all_inqueues = set() - - # Set of fds being written to (busy) - self._active_writes = set() - - # Set of active co-routines currently writing jobs. - self._active_writers = set() - - # Set of fds that are busy (executing task) - self._busy_workers = set() - self._mark_worker_as_available = self._busy_workers.discard - - # Holds jobs waiting to be written to child processes. - self.outbound_buffer = deque() - - self.write_stats = Counter() - - super(AsynPool, self).__init__(processes, *args, **kwargs) - - for proc in self._pool: - # create initial mappings, these will be updated - # as processes are recycled, or found lost elsewhere. - self._fileno_to_outq[proc.outqR_fd] = proc - self._fileno_to_synq[proc.synqW_fd] = proc - self.on_soft_timeout = self.on_hard_timeout = None - if self._timeout_handler: - self.on_soft_timeout = self._timeout_handler.on_soft_timeout - self.on_hard_timeout = self._timeout_handler.on_hard_timeout - - def _create_worker_process(self, i): - gc.collect() # Issue #2927 - return super(AsynPool, self)._create_worker_process(i) - - def _event_process_exit(self, hub, proc): - # This method is called whenever the process sentinel is readable. - self._untrack_child_process(proc, hub) - self.maintain_pool() - - def _track_child_process(self, proc, hub): - try: - fd = proc._sentinel_poll - except AttributeError: - # we need to duplicate the fd here to carefully - # control when the fd is removed from the process table, - # as once the original fd is closed we cannot unregister - # the fd from epoll(7) anymore, causing a 100% CPU poll loop. - fd = proc._sentinel_poll = os.dup(proc._popen.sentinel) - hub.add_reader(fd, self._event_process_exit, hub, proc) - - def _untrack_child_process(self, proc, hub): - if proc._sentinel_poll is not None: - fd, proc._sentinel_poll = proc._sentinel_poll, None - hub.remove(fd) - os.close(fd) - - def register_with_event_loop(self, hub): - """Registers the async pool with the current event loop.""" - self._result_handler.register_with_event_loop(hub) - self.handle_result_event = self._result_handler.handle_event - self._create_timelimit_handlers(hub) - self._create_process_handlers(hub) - self._create_write_handlers(hub) - - # Add handler for when a process exits (calls maintain_pool) - [self._track_child_process(w, hub) for w in self._pool] - # Handle_result_event is called whenever one of the - # result queues are readable. - [hub.add_reader(fd, self.handle_result_event, fd) - for fd in self._fileno_to_outq] - - # Timers include calling maintain_pool at a regular interval - # to be certain processes are restarted. - for handler, interval in items(self.timers): - hub.call_repeatedly(interval, handler) - - hub.on_tick.add(self.on_poll_start) - - def _create_timelimit_handlers(self, hub, now=time.time): - """For async pool this sets up the handlers used - to implement time limits.""" - call_later = hub.call_later - trefs = self._tref_for_id = WeakValueDictionary() - - def on_timeout_set(R, soft, hard): - if soft: - trefs[R._job] = call_later( - soft, self._on_soft_timeout, R._job, soft, hard, hub, - ) - elif hard: - trefs[R._job] = call_later( - hard, self._on_hard_timeout, R._job, - ) - self.on_timeout_set = on_timeout_set - - def _discard_tref(job): - try: - tref = trefs.pop(job) - tref.cancel() - del(tref) - except (KeyError, AttributeError): - pass # out of scope - self._discard_tref = _discard_tref - - def on_timeout_cancel(R): - _discard_tref(R._job) - self.on_timeout_cancel = on_timeout_cancel - - def _on_soft_timeout(self, job, soft, hard, hub, now=time.time): - # only used by async pool. - if hard: - self._tref_for_id[job] = hub.call_at( - now() + (hard - soft), self._on_hard_timeout, job, - ) - try: - result = self._cache[job] - except KeyError: - pass # job ready - else: - self.on_soft_timeout(result) - finally: - if not hard: - # remove tref - self._discard_tref(job) - - def _on_hard_timeout(self, job): - # only used by async pool. - try: - result = self._cache[job] - except KeyError: - pass # job ready - else: - self.on_hard_timeout(result) - finally: - # remove tref - self._discard_tref(job) - - def on_job_ready(self, job, i, obj, inqW_fd): - self._mark_worker_as_available(inqW_fd) - - def _create_process_handlers(self, hub, READ=READ, ERR=ERR): - """For async pool this will create the handlers called - when a process is up/down and etc.""" - add_reader, remove_reader, remove_writer = ( - hub.add_reader, hub.remove_reader, hub.remove_writer, - ) - cache = self._cache - all_inqueues = self._all_inqueues - fileno_to_inq = self._fileno_to_inq - fileno_to_outq = self._fileno_to_outq - fileno_to_synq = self._fileno_to_synq - busy_workers = self._busy_workers - handle_result_event = self.handle_result_event - process_flush_queues = self.process_flush_queues - waiting_to_start = self._waiting_to_start - - def verify_process_alive(proc): - proc = proc() # is a weakref - if (proc is not None and proc._is_alive() and - proc in waiting_to_start): - assert proc.outqR_fd in fileno_to_outq - assert fileno_to_outq[proc.outqR_fd] is proc - assert proc.outqR_fd in hub.readers - error('Timed out waiting for UP message from %r', proc) - os.kill(proc.pid, 9) - - def on_process_up(proc): - """Called when a process has started.""" - # If we got the same fd as a previous process then we will also - # receive jobs in the old buffer, so we need to reset the - # job._write_to and job._scheduled_for attributes used to recover - # message boundaries when processes exit. - infd = proc.inqW_fd - for job in values(cache): - if job._write_to and job._write_to.inqW_fd == infd: - job._write_to = proc - if job._scheduled_for and job._scheduled_for.inqW_fd == infd: - job._scheduled_for = proc - fileno_to_outq[proc.outqR_fd] = proc - - # maintain_pool is called whenever a process exits. - self._track_child_process(proc, hub) - - assert not isblocking(proc.outq._reader) - - # handle_result_event is called when the processes outqueue is - # readable. - add_reader(proc.outqR_fd, handle_result_event, proc.outqR_fd) - - waiting_to_start.add(proc) - hub.call_later( - self._proc_alive_timeout, verify_process_alive, ref(proc), - ) - - self.on_process_up = on_process_up - - def _remove_from_index(obj, proc, index, remove_fun, callback=None): - # this remove the file descriptors for a process from - # the indices. we have to make sure we don't overwrite - # another processes fds, as the fds may be reused. - try: - fd = obj.fileno() - except (IOError, OSError): - return - - try: - if index[fd] is proc: - # fd has not been reused so we can remove it from index. - index.pop(fd, None) - except KeyError: - pass - else: - remove_fun(fd) - if callback is not None: - callback(fd) - return fd - - def on_process_down(proc): - """Called when a worker process exits.""" - if getattr(proc, 'dead', None): - return - process_flush_queues(proc) - _remove_from_index( - proc.outq._reader, proc, fileno_to_outq, remove_reader, - ) - if proc.synq: - _remove_from_index( - proc.synq._writer, proc, fileno_to_synq, remove_writer, - ) - inq = _remove_from_index( - proc.inq._writer, proc, fileno_to_inq, remove_writer, - callback=all_inqueues.discard, - ) - if inq: - busy_workers.discard(inq) - self._untrack_child_process(proc, hub) - waiting_to_start.discard(proc) - self._active_writes.discard(proc.inqW_fd) - remove_writer(proc.inq._writer) - remove_reader(proc.outq._reader) - if proc.synqR_fd: - remove_reader(proc.synq._reader) - if proc.synqW_fd: - self._active_writes.discard(proc.synqW_fd) - remove_reader(proc.synq._writer) - self.on_process_down = on_process_down - - def _create_write_handlers(self, hub, - pack=struct.pack, dumps=_pickle.dumps, - protocol=HIGHEST_PROTOCOL): - """For async pool this creates the handlers used to write data to - child processes.""" - fileno_to_inq = self._fileno_to_inq - fileno_to_synq = self._fileno_to_synq - outbound = self.outbound_buffer - pop_message = outbound.popleft - append_message = outbound.append - put_back_message = outbound.appendleft - all_inqueues = self._all_inqueues - active_writes = self._active_writes - active_writers = self._active_writers - busy_workers = self._busy_workers - diff = all_inqueues.difference - add_writer = hub.add_writer - hub_add, hub_remove = hub.add, hub.remove - mark_write_fd_as_active = active_writes.add - mark_write_gen_as_active = active_writers.add - mark_worker_as_busy = busy_workers.add - write_generator_done = active_writers.discard - get_job = self._cache.__getitem__ - write_stats = self.write_stats - is_fair_strategy = self.sched_strategy == SCHED_STRATEGY_FAIR - revoked_tasks = worker_state.revoked - getpid = os.getpid - - precalc = {ACK: self._create_payload(ACK, (0, )), - NACK: self._create_payload(NACK, (0, ))} - - def _put_back(job, _time=time.time): - # puts back at the end of the queue - if job._terminated is not None or \ - job.correlation_id in revoked_tasks: - if not job._accepted: - job._ack(None, _time(), getpid(), None) - job._set_terminated(job._terminated) - else: - # XXX linear lookup, should find a better way, - # but this happens rarely and is here to protect against races. - if job not in outbound: - outbound.appendleft(job) - self._put_back = _put_back - - # called for every event loop iteration, and if there - # are messages pending this will schedule writing one message - # by registering the 'schedule_writes' function for all currently - # inactive inqueues (not already being written to) - - # consolidate means the event loop will merge them - # and call the callback once with the list writable fds as - # argument. Using this means we minimize the risk of having - # the same fd receive every task if the pipe read buffer is not - # full. - if is_fair_strategy: - - def on_poll_start(): - if outbound and len(busy_workers) < len(all_inqueues): - inactive = diff(active_writes) - [hub_add(fd, None, WRITE | ERR, consolidate=True) - for fd in inactive] - else: - [hub_remove(fd) for fd in diff(active_writes)] - else: - def on_poll_start(): # noqa - if outbound: - [hub_add(fd, None, WRITE | ERR, consolidate=True) - for fd in diff(active_writes)] - else: - [hub_remove(fd) for fd in diff(active_writes)] - self.on_poll_start = on_poll_start - - def on_inqueue_close(fd, proc): - # Makes sure the fd is removed from tracking when - # the connection is closed, this is essential as fds may be reused. - busy_workers.discard(fd) - try: - if fileno_to_inq[fd] is proc: - fileno_to_inq.pop(fd, None) - active_writes.discard(fd) - all_inqueues.discard(fd) - hub_remove(fd) - except KeyError: - pass - self.on_inqueue_close = on_inqueue_close - - def schedule_writes(ready_fds, curindex=[0]): - # Schedule write operation to ready file descriptor. - # The file descriptor is writeable, but that does not - # mean the process is currently reading from the socket. - # The socket is buffered so writeable simply means that - # the buffer can accept at least 1 byte of data. - - # This means we have to cycle between the ready fds. - # the first version used shuffle, but using i % total - # is about 30% faster with many processes. The latter - # also shows more fairness in write stats when used with - # many processes [XXX On OS X, this may vary depending - # on event loop implementation (i.e select vs epoll), so - # have to test further] - total = len(ready_fds) - - for i in range(total): - ready_fd = ready_fds[curindex[0] % total] - if ready_fd in active_writes: - # already writing to this fd - curindex[0] += 1 - continue - if is_fair_strategy and ready_fd in busy_workers: - # worker is already busy with another task - curindex[0] += 1 - continue - if ready_fd not in all_inqueues: - hub_remove(ready_fd) - curindex[0] += 1 - continue - try: - job = pop_message() - except IndexError: - # no more messages, remove all inactive fds from the hub. - # this is important since the fds are always writeable - # as long as there's 1 byte left in the buffer, and so - # this may create a spinloop where the event loop - # always wakes up. - for inqfd in diff(active_writes): - hub_remove(inqfd) - break - else: - if not job._accepted: # job not accepted by another worker - try: - # keep track of what process the write operation - # was scheduled for. - proc = job._scheduled_for = fileno_to_inq[ready_fd] - except KeyError: - # write was scheduled for this fd but the process - # has since exited and the message must be sent to - # another process. - put_back_message(job) - curindex[0] += 1 - continue - cor = _write_job(proc, ready_fd, job) - job._writer = ref(cor) - mark_write_gen_as_active(cor) - mark_write_fd_as_active(ready_fd) - mark_worker_as_busy(ready_fd) - - # Try to write immediately, in case there's an error. - try: - next(cor) - except StopIteration: - pass - except OSError as exc: - if get_errno(exc) != errno.EBADF: - raise - else: - add_writer(ready_fd, cor) - curindex[0] += 1 - hub.consolidate_callback = schedule_writes - - def send_job(tup): - # Schedule writing job request for when one of the process - # inqueues are writable. - body = dumps(tup, protocol=protocol) - body_size = len(body) - header = pack('>I', body_size) - # index 1,0 is the job ID. - job = get_job(tup[1][0]) - job._payload = buf_t(header), buf_t(body), body_size - append_message(job) - self._quick_put = send_job - - def on_not_recovering(proc, fd, job, exc): - error('Process inqueue damaged: %r %r: %r', - proc, proc.exitcode, exc, exc_info=1) - if proc._is_alive(): - proc.terminate() - hub.remove(fd) - self._put_back(job) - - def _write_job(proc, fd, job): - # writes job to the worker process. - # Operation must complete if more than one byte of data - # was written. If the broker connection is lost - # and no data was written the operation shall be canceled. - header, body, body_size = job._payload - errors = 0 - try: - # job result keeps track of what process the job is sent to. - job._write_to = proc - send = proc.send_job_offset - - Hw = Bw = 0 - # write header - while Hw < 4: - try: - Hw += send(header, Hw) - except Exception as exc: - if get_errno(exc) not in UNAVAIL: - raise - # suspend until more data - errors += 1 - if errors > 100: - on_not_recovering(proc, fd, job, exc) - raise StopIteration() - yield - else: - errors = 0 - - # write body - while Bw < body_size: - try: - Bw += send(body, Bw) - except Exception as exc: - if get_errno(exc) not in UNAVAIL: - raise - # suspend until more data - errors += 1 - if errors > 100: - on_not_recovering(proc, fd, job, exc) - raise StopIteration() - yield - else: - errors = 0 - finally: - hub_remove(fd) - write_stats[proc.index] += 1 - # message written, so this fd is now available - active_writes.discard(fd) - write_generator_done(job._writer()) # is a weakref - - def send_ack(response, pid, job, fd, WRITE=WRITE, ERR=ERR): - # Only used when synack is enabled. - # Schedule writing ack response for when the fd is writeable. - msg = Ack(job, fd, precalc[response]) - callback = promise(write_generator_done) - cor = _write_ack(fd, msg, callback=callback) - mark_write_gen_as_active(cor) - mark_write_fd_as_active(fd) - callback.args = (cor, ) - add_writer(fd, cor) - self.send_ack = send_ack - - def _write_ack(fd, ack, callback=None): - # writes ack back to the worker if synack enabled. - # this operation *MUST* complete, otherwise - # the worker process will hang waiting for the ack. - header, body, body_size = ack[2] - try: - try: - proc = fileno_to_synq[fd] - except KeyError: - # process died, we can safely discard the ack at this - # point. - raise StopIteration() - send = proc.send_syn_offset - - Hw = Bw = 0 - # write header - while Hw < 4: - try: - Hw += send(header, Hw) - except Exception as exc: - if get_errno(exc) not in UNAVAIL: - raise - yield - - # write body - while Bw < body_size: - try: - Bw += send(body, Bw) - except Exception as exc: - if get_errno(exc) not in UNAVAIL: - raise - # suspend until more data - yield - finally: - if callback: - callback() - # message written, so this fd is now available - active_writes.discard(fd) - - def flush(self): - if self._state == TERMINATE: - return - # cancel all tasks that have not been accepted so that NACK is sent. - for job in values(self._cache): - if not job._accepted: - job._cancel() - - # clear the outgoing buffer as the tasks will be redelivered by - # the broker anyway. - if self.outbound_buffer: - self.outbound_buffer.clear() - - self.maintain_pool() - - try: - # ...but we must continue writing the payloads we already started - # to keep message boundaries. - # The messages may be NACK'ed later if synack is enabled. - if self._state == RUN: - # flush outgoing buffers - intervals = fxrange(0.01, 0.1, 0.01, repeatlast=True) - owned_by = {} - for job in values(self._cache): - writer = _get_job_writer(job) - if writer is not None: - owned_by[writer] = job - - while self._active_writers: - writers = list(self._active_writers) - for gen in writers: - if (gen.__name__ == '_write_job' and - gen_not_started(gen)): - # has not started writing the job so can - # discard the task, but we must also remove - # it from the Pool._cache. - try: - job = owned_by[gen] - except KeyError: - pass - else: - # removes from Pool._cache - job.discard() - self._active_writers.discard(gen) - else: - try: - job = owned_by[gen] - except KeyError: - pass - else: - job_proc = job._write_to - if job_proc._is_alive(): - self._flush_writer(job_proc, gen) - # workers may have exited in the meantime. - self.maintain_pool() - sleep(next(intervals)) # don't busyloop - finally: - self.outbound_buffer.clear() - self._active_writers.clear() - self._active_writes.clear() - self._busy_workers.clear() - - def _flush_writer(self, proc, writer): - fds = set([proc.inq._writer]) - try: - while fds: - if not proc._is_alive(): - break # process exited - readable, writable, again = _select( - writers=fds, err=fds, timeout=0.5, - ) - if not again and (writable or readable): - try: - next(writer) - except (StopIteration, OSError, IOError, EOFError): - break - finally: - self._active_writers.discard(writer) - - def get_process_queues(self): - """Get queues for a new process. - - Here we will find an unused slot, as there should always - be one available when we start a new process. - """ - return next(q for q, owner in items(self._queues) - if owner is None) - - def on_grow(self, n): - """Grow the pool by ``n`` proceses.""" - diff = max(self._processes - len(self._queues), 0) - if diff: - self._queues.update( - dict((self.create_process_queues(), None) for _ in range(diff)) - ) - - def on_shrink(self, n): - """Shrink the pool by ``n`` processes.""" - pass - - def create_process_queues(self): - """Creates new in, out (and optionally syn) queues, - returned as a tuple.""" - # NOTE: Pipes must be set O_NONBLOCK at creation time (the original - # fd), otherwise it will not be possible to change the flags until - # there is an actual reader/writer on the other side. - inq = _SimpleQueue(wnonblock=True) - outq = _SimpleQueue(rnonblock=True) - synq = None - assert isblocking(inq._reader) - assert not isblocking(inq._writer) - assert not isblocking(outq._reader) - assert isblocking(outq._writer) - if self.synack: - synq = _SimpleQueue(wnonblock=True) - assert isblocking(synq._reader) - assert not isblocking(synq._writer) - return inq, outq, synq - - def on_process_alive(self, pid): - """Handler called when the :const:`WORKER_UP` message is received - from a child process, which marks the process as ready - to receive work.""" - try: - proc = next(w for w in self._pool if w.pid == pid) - except StopIteration: - return logger.warning('process with pid=%s already exited', pid) - assert proc.inqW_fd not in self._fileno_to_inq - assert proc.inqW_fd not in self._all_inqueues - self._waiting_to_start.discard(proc) - self._fileno_to_inq[proc.inqW_fd] = proc - self._fileno_to_synq[proc.synqW_fd] = proc - self._all_inqueues.add(proc.inqW_fd) - - def on_job_process_down(self, job, pid_gone): - """Handler called for each job when the process it was assigned to - exits.""" - if job._write_to and not job._write_to._is_alive(): - # job was partially written - self.on_partial_read(job, job._write_to) - elif job._scheduled_for and not job._scheduled_for._is_alive(): - # job was only scheduled to be written to this process, - # but no data was sent so put it back on the outbound_buffer. - self._put_back(job) - - def on_job_process_lost(self, job, pid, exitcode): - """Handler called for each *started* job when the process it - was assigned to exited by mysterious means (error exitcodes and - signals)""" - self.mark_as_worker_lost(job, exitcode) - - def human_write_stats(self): - if self.write_stats is None: - return 'N/A' - vals = list(values(self.write_stats)) - total = sum(vals) - - def per(v, total): - return '{0:.2f}%'.format((float(v) / total) * 100.0 if v else 0) - - return { - 'total': total, - 'avg': per(total / len(self.write_stats) if total else 0, total), - 'all': ', '.join(per(v, total) for v in vals), - 'raw': ', '.join(map(str, vals)), - 'inqueues': { - 'total': len(self._all_inqueues), - 'active': len(self._active_writes), - } - } - - def _process_cleanup_queues(self, proc): - """Handler called to clean up a processes queues after process - exit.""" - if not proc.dead: - try: - self._queues[self._find_worker_queues(proc)] = None - except (KeyError, ValueError): - pass - - @staticmethod - def _stop_task_handler(task_handler): - """Called at shutdown to tell processes that we are shutting down.""" - for proc in task_handler.pool: - try: - setblocking(proc.inq._writer, 1) - except (OSError, IOError): - pass - else: - try: - proc.inq.put(None) - except OSError as exc: - if get_errno(exc) != errno.EBADF: - raise - - def create_result_handler(self): - return super(AsynPool, self).create_result_handler( - fileno_to_outq=self._fileno_to_outq, - on_process_alive=self.on_process_alive, - ) - - def _process_register_queues(self, proc, queues): - """Marks new ownership for ``queues`` so that the fileno indices are - updated.""" - assert queues in self._queues - b = len(self._queues) - self._queues[queues] = proc - assert b == len(self._queues) - - def _find_worker_queues(self, proc): - """Find the queues owned by ``proc``.""" - try: - return next(q for q, owner in items(self._queues) - if owner == proc) - except StopIteration: - raise ValueError(proc) - - def _setup_queues(self): - # this is only used by the original pool which uses a shared - # queue for all processes. - - # these attributes makes no sense for us, but we will still - # have to initialize them. - self._inqueue = self._outqueue = \ - self._quick_put = self._quick_get = self._poll_result = None - - def process_flush_queues(self, proc): - """Flushes all queues, including the outbound buffer, so that - all tasks that have not been started will be discarded. - - In Celery this is called whenever the transport connection is lost - (consumer restart). - - """ - resq = proc.outq._reader - on_state_change = self._result_handler.on_state_change - fds = set([resq]) - while fds and not resq.closed and self._state != TERMINATE: - readable, _, again = _select(fds, None, fds, timeout=0.01) - if readable: - try: - task = resq.recv() - except (OSError, IOError, EOFError) as exc: - if get_errno(exc) == errno.EINTR: - continue - elif get_errno(exc) == errno.EAGAIN: - break - else: - debug('got %r while flushing process %r', - exc, proc, exc_info=1) - if get_errno(exc) not in UNAVAIL: - debug('got %r while flushing process %r', - exc, proc, exc_info=1) - break - else: - if task is None: - debug('got sentinel while flushing process %r', proc) - break - else: - on_state_change(task) - else: - break - - def on_partial_read(self, job, proc): - """Called when a job was only partially written to a child process - and it exited.""" - # worker terminated by signal: - # we cannot reuse the sockets again, because we don't know if - # the process wrote/read anything frmo them, and if so we cannot - # restore the message boundaries. - if not job._accepted: - # job was not acked, so find another worker to send it to. - self._put_back(job) - writer = _get_job_writer(job) - if writer: - self._active_writers.discard(writer) - del(writer) - - if not proc.dead: - proc.dead = True - # Replace queues to avoid reuse - before = len(self._queues) - try: - queues = self._find_worker_queues(proc) - if self.destroy_queues(queues, proc): - self._queues[self.create_process_queues()] = None - except ValueError: - pass - assert len(self._queues) == before - - def destroy_queues(self, queues, proc): - """Destroy queues that can no longer be used, so that they - be replaced by new sockets.""" - assert not proc._is_alive() - self._waiting_to_start.discard(proc) - removed = 1 - try: - self._queues.pop(queues) - except KeyError: - removed = 0 - try: - self.on_inqueue_close(queues[0]._writer.fileno(), proc) - except IOError: - pass - for queue in queues: - if queue: - for sock in (queue._reader, queue._writer): - if not sock.closed: - try: - sock.close() - except (IOError, OSError): - pass - return removed - - def _create_payload(self, type_, args, - dumps=_pickle.dumps, pack=struct.pack, - protocol=HIGHEST_PROTOCOL): - body = dumps((type_, args), protocol=protocol) - size = len(body) - header = pack('>I', size) - return header, body, size - - @classmethod - def _set_result_sentinel(cls, _outqueue, _pool): - # unused - pass - - def _help_stuff_finish_args(self): - # Pool._help_stuff_finished is a classmethod so we have to use this - # trick to modify the arguments passed to it. - return (self._pool, ) - - @classmethod - def _help_stuff_finish(cls, pool): - debug( - 'removing tasks from inqueue until task handler finished', - ) - fileno_to_proc = {} - inqR = set() - for w in pool: - try: - fd = w.inq._reader.fileno() - inqR.add(fd) - fileno_to_proc[fd] = w - except IOError: - pass - while inqR: - readable, _, again = _select(inqR, timeout=0.5) - if again: - continue - if not readable: - break - for fd in readable: - fileno_to_proc[fd].inq._reader.recv() - sleep(0) - - @property - def timers(self): - return {self.maintain_pool: 5.0} diff --git a/thesisenv/lib/python3.6/site-packages/celery/concurrency/base.py b/thesisenv/lib/python3.6/site-packages/celery/concurrency/base.py deleted file mode 100644 index 29c348d..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/concurrency/base.py +++ /dev/null @@ -1,171 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.concurrency.base - ~~~~~~~~~~~~~~~~~~~~~~~ - - TaskPool interface. - -""" -from __future__ import absolute_import - -import logging -import os -import sys - -from billiard.einfo import ExceptionInfo -from billiard.exceptions import WorkerLostError -from kombu.utils.encoding import safe_repr - -from celery.exceptions import WorkerShutdown, WorkerTerminate -from celery.five import monotonic, reraise -from celery.utils import timer2 -from celery.utils.text import truncate -from celery.utils.log import get_logger - -__all__ = ['BasePool', 'apply_target'] - -logger = get_logger('celery.pool') - - -def apply_target(target, args=(), kwargs={}, callback=None, - accept_callback=None, pid=None, getpid=os.getpid, - propagate=(), monotonic=monotonic, **_): - if accept_callback: - accept_callback(pid or getpid(), monotonic()) - try: - ret = target(*args, **kwargs) - except propagate: - raise - except Exception: - raise - except (WorkerShutdown, WorkerTerminate): - raise - except BaseException as exc: - try: - reraise(WorkerLostError, WorkerLostError(repr(exc)), - sys.exc_info()[2]) - except WorkerLostError: - callback(ExceptionInfo()) - else: - callback(ret) - - -class BasePool(object): - RUN = 0x1 - CLOSE = 0x2 - TERMINATE = 0x3 - - Timer = timer2.Timer - - #: set to true if the pool can be shutdown from within - #: a signal handler. - signal_safe = True - - #: set to true if pool uses greenlets. - is_green = False - - _state = None - _pool = None - - #: only used by multiprocessing pool - uses_semaphore = False - - task_join_will_block = True - - def __init__(self, limit=None, putlocks=True, - forking_enable=True, callbacks_propagate=(), **options): - self.limit = limit - self.putlocks = putlocks - self.options = options - self.forking_enable = forking_enable - self.callbacks_propagate = callbacks_propagate - self._does_debug = logger.isEnabledFor(logging.DEBUG) - - def on_start(self): - pass - - def did_start_ok(self): - return True - - def flush(self): - pass - - def on_stop(self): - pass - - def register_with_event_loop(self, loop): - pass - - def on_apply(self, *args, **kwargs): - pass - - def on_terminate(self): - pass - - def on_soft_timeout(self, job): - pass - - def on_hard_timeout(self, job): - pass - - def maintain_pool(self, *args, **kwargs): - pass - - def terminate_job(self, pid, signal=None): - raise NotImplementedError( - '{0} does not implement kill_job'.format(type(self))) - - def restart(self): - raise NotImplementedError( - '{0} does not implement restart'.format(type(self))) - - def stop(self): - self.on_stop() - self._state = self.TERMINATE - - def terminate(self): - self._state = self.TERMINATE - self.on_terminate() - - def start(self): - self.on_start() - self._state = self.RUN - - def close(self): - self._state = self.CLOSE - self.on_close() - - def on_close(self): - pass - - def apply_async(self, target, args=[], kwargs={}, **options): - """Equivalent of the :func:`apply` built-in function. - - Callbacks should optimally return as soon as possible since - otherwise the thread which handles the result will get blocked. - - """ - if self._does_debug: - logger.debug('TaskPool: Apply %s (args:%s kwargs:%s)', - target, truncate(safe_repr(args), 1024), - truncate(safe_repr(kwargs), 1024)) - - return self.on_apply(target, args, kwargs, - waitforslot=self.putlocks, - callbacks_propagate=self.callbacks_propagate, - **options) - - def _get_info(self): - return {} - - @property - def info(self): - return self._get_info() - - @property - def active(self): - return self._state == self.RUN - - @property - def num_processes(self): - return self.limit diff --git a/thesisenv/lib/python3.6/site-packages/celery/concurrency/eventlet.py b/thesisenv/lib/python3.6/site-packages/celery/concurrency/eventlet.py deleted file mode 100644 index 3ae4549..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/concurrency/eventlet.py +++ /dev/null @@ -1,161 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.concurrency.eventlet - ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Eventlet pool implementation. - -""" -from __future__ import absolute_import - -import sys - -from time import time - -__all__ = ['TaskPool'] - -W_RACE = """\ -Celery module with %s imported before eventlet patched\ -""" -RACE_MODS = ('billiard.', 'celery.', 'kombu.') - - -#: Warn if we couldn't patch early enough, -#: and thread/socket depending celery modules have already been loaded. -for mod in (mod for mod in sys.modules if mod.startswith(RACE_MODS)): - for side in ('thread', 'threading', 'socket'): # pragma: no cover - if getattr(mod, side, None): - import warnings - warnings.warn(RuntimeWarning(W_RACE % side)) - - -from celery import signals # noqa -from celery.utils import timer2 # noqa - -from . import base # noqa - - -def apply_target(target, args=(), kwargs={}, callback=None, - accept_callback=None, getpid=None): - return base.apply_target(target, args, kwargs, callback, accept_callback, - pid=getpid()) - - -class Schedule(timer2.Schedule): - - def __init__(self, *args, **kwargs): - from eventlet.greenthread import spawn_after - from greenlet import GreenletExit - super(Schedule, self).__init__(*args, **kwargs) - - self.GreenletExit = GreenletExit - self._spawn_after = spawn_after - self._queue = set() - - def _enter(self, eta, priority, entry): - secs = max(eta - time(), 0) - g = self._spawn_after(secs, entry) - self._queue.add(g) - g.link(self._entry_exit, entry) - g.entry = entry - g.eta = eta - g.priority = priority - g.canceled = False - return g - - def _entry_exit(self, g, entry): - try: - try: - g.wait() - except self.GreenletExit: - entry.cancel() - g.canceled = True - finally: - self._queue.discard(g) - - def clear(self): - queue = self._queue - while queue: - try: - queue.pop().cancel() - except (KeyError, self.GreenletExit): - pass - - @property - def queue(self): - return self._queue - - -class Timer(timer2.Timer): - Schedule = Schedule - - def ensure_started(self): - pass - - def stop(self): - self.schedule.clear() - - def cancel(self, tref): - try: - tref.cancel() - except self.schedule.GreenletExit: - pass - - def start(self): - pass - - -class TaskPool(base.BasePool): - Timer = Timer - - signal_safe = False - is_green = True - task_join_will_block = False - - def __init__(self, *args, **kwargs): - from eventlet import greenthread - from eventlet.greenpool import GreenPool - self.Pool = GreenPool - self.getcurrent = greenthread.getcurrent - self.getpid = lambda: id(greenthread.getcurrent()) - self.spawn_n = greenthread.spawn_n - - super(TaskPool, self).__init__(*args, **kwargs) - - def on_start(self): - self._pool = self.Pool(self.limit) - signals.eventlet_pool_started.send(sender=self) - self._quick_put = self._pool.spawn_n - self._quick_apply_sig = signals.eventlet_pool_apply.send - - def on_stop(self): - signals.eventlet_pool_preshutdown.send(sender=self) - if self._pool is not None: - self._pool.waitall() - signals.eventlet_pool_postshutdown.send(sender=self) - - def on_apply(self, target, args=None, kwargs=None, callback=None, - accept_callback=None, **_): - self._quick_apply_sig( - sender=self, target=target, args=args, kwargs=kwargs, - ) - self._quick_put(apply_target, target, args, kwargs, - callback, accept_callback, - self.getpid) - - def grow(self, n=1): - limit = self.limit + n - self._pool.resize(limit) - self.limit = limit - - def shrink(self, n=1): - limit = self.limit - n - self._pool.resize(limit) - self.limit = limit - - def _get_info(self): - return { - 'max-concurrency': self.limit, - 'free-threads': self._pool.free(), - 'running-threads': self._pool.running(), - } diff --git a/thesisenv/lib/python3.6/site-packages/celery/concurrency/gevent.py b/thesisenv/lib/python3.6/site-packages/celery/concurrency/gevent.py deleted file mode 100644 index f567f57..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/concurrency/gevent.py +++ /dev/null @@ -1,136 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.concurrency.gevent - ~~~~~~~~~~~~~~~~~~~~~~~~~ - - gevent pool implementation. - -""" -from __future__ import absolute_import - -from time import time - -try: - from gevent import Timeout -except ImportError: # pragma: no cover - Timeout = None # noqa - -from celery.utils import timer2 - -from .base import apply_target, BasePool - -__all__ = ['TaskPool'] - - -def apply_timeout(target, args=(), kwargs={}, callback=None, - accept_callback=None, pid=None, timeout=None, - timeout_callback=None, Timeout=Timeout, - apply_target=apply_target, **rest): - try: - with Timeout(timeout): - return apply_target(target, args, kwargs, callback, - accept_callback, pid, - propagate=(Timeout, ), **rest) - except Timeout: - return timeout_callback(False, timeout) - - -class Schedule(timer2.Schedule): - - def __init__(self, *args, **kwargs): - from gevent.greenlet import Greenlet, GreenletExit - - class _Greenlet(Greenlet): - cancel = Greenlet.kill - - self._Greenlet = _Greenlet - self._GreenletExit = GreenletExit - super(Schedule, self).__init__(*args, **kwargs) - self._queue = set() - - def _enter(self, eta, priority, entry): - secs = max(eta - time(), 0) - g = self._Greenlet.spawn_later(secs, entry) - self._queue.add(g) - g.link(self._entry_exit) - g.entry = entry - g.eta = eta - g.priority = priority - g.canceled = False - return g - - def _entry_exit(self, g): - try: - g.kill() - finally: - self._queue.discard(g) - - def clear(self): - queue = self._queue - while queue: - try: - queue.pop().kill() - except KeyError: - pass - - @property - def queue(self): - return self._queue - - -class Timer(timer2.Timer): - Schedule = Schedule - - def ensure_started(self): - pass - - def stop(self): - self.schedule.clear() - - def start(self): - pass - - -class TaskPool(BasePool): - Timer = Timer - - signal_safe = False - is_green = True - task_join_will_block = False - - def __init__(self, *args, **kwargs): - from gevent import spawn_raw - from gevent.pool import Pool - self.Pool = Pool - self.spawn_n = spawn_raw - self.timeout = kwargs.get('timeout') - super(TaskPool, self).__init__(*args, **kwargs) - - def on_start(self): - self._pool = self.Pool(self.limit) - self._quick_put = self._pool.spawn - - def on_stop(self): - if self._pool is not None: - self._pool.join() - - def on_apply(self, target, args=None, kwargs=None, callback=None, - accept_callback=None, timeout=None, - timeout_callback=None, **_): - timeout = self.timeout if timeout is None else timeout - return self._quick_put(apply_timeout if timeout else apply_target, - target, args, kwargs, callback, accept_callback, - timeout=timeout, - timeout_callback=timeout_callback) - - def grow(self, n=1): - self._pool._semaphore.counter += n - self._pool.size += n - - def shrink(self, n=1): - self._pool._semaphore.counter -= n - self._pool.size -= n - - @property - def num_processes(self): - return len(self._pool) diff --git a/thesisenv/lib/python3.6/site-packages/celery/concurrency/prefork.py b/thesisenv/lib/python3.6/site-packages/celery/concurrency/prefork.py deleted file mode 100644 index 1771f5c..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/concurrency/prefork.py +++ /dev/null @@ -1,178 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.concurrency.prefork - ~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Pool implementation using :mod:`multiprocessing`. - -""" -from __future__ import absolute_import - -import os - -from billiard import forking_enable -from billiard.pool import RUN, CLOSE, Pool as BlockingPool - -from celery import platforms -from celery import signals -from celery._state import set_default_app, _set_task_join_will_block -from celery.app import trace -from celery.concurrency.base import BasePool -from celery.five import items -from celery.utils.functional import noop -from celery.utils.log import get_logger - -from .asynpool import AsynPool - -__all__ = ['TaskPool', 'process_initializer', 'process_destructor'] - -#: List of signals to reset when a child process starts. -WORKER_SIGRESET = frozenset(['SIGTERM', - 'SIGHUP', - 'SIGTTIN', - 'SIGTTOU', - 'SIGUSR1']) - -#: List of signals to ignore when a child process starts. -WORKER_SIGIGNORE = frozenset(['SIGINT']) - -logger = get_logger(__name__) -warning, debug = logger.warning, logger.debug - - -def process_initializer(app, hostname): - """Pool child process initializer. - - This will initialize a child pool process to ensure the correct - app instance is used and things like - logging works. - - """ - _set_task_join_will_block(True) - platforms.signals.reset(*WORKER_SIGRESET) - platforms.signals.ignore(*WORKER_SIGIGNORE) - platforms.set_mp_process_title('celeryd', hostname=hostname) - # This is for Windows and other platforms not supporting - # fork(). Note that init_worker makes sure it's only - # run once per process. - app.loader.init_worker() - app.loader.init_worker_process() - logfile = os.environ.get('CELERY_LOG_FILE') or None - if logfile and '%i' in logfile.lower(): - # logfile path will differ so need to set up logging again. - app.log.already_setup = False - app.log.setup(int(os.environ.get('CELERY_LOG_LEVEL', 0) or 0), - logfile, - bool(os.environ.get('CELERY_LOG_REDIRECT', False)), - str(os.environ.get('CELERY_LOG_REDIRECT_LEVEL')), - hostname=hostname) - if os.environ.get('FORKED_BY_MULTIPROCESSING'): - # pool did execv after fork - trace.setup_worker_optimizations(app) - else: - app.set_current() - set_default_app(app) - app.finalize() - trace._tasks = app._tasks # enables fast_trace_task optimization. - # rebuild execution handler for all tasks. - from celery.app.trace import build_tracer - for name, task in items(app.tasks): - task.__trace__ = build_tracer(name, task, app.loader, hostname, - app=app) - from celery.worker import state as worker_state - worker_state.reset_state() - signals.worker_process_init.send(sender=None) - - -def process_destructor(pid, exitcode): - """Pool child process destructor - - Dispatch the :signal:`worker_process_shutdown` signal. - - """ - signals.worker_process_shutdown.send( - sender=None, pid=pid, exitcode=exitcode, - ) - - -class TaskPool(BasePool): - """Multiprocessing Pool implementation.""" - Pool = AsynPool - BlockingPool = BlockingPool - - uses_semaphore = True - write_stats = None - - def on_start(self): - """Run the task pool. - - Will pre-fork all workers so they're ready to accept tasks. - - """ - forking_enable(self.forking_enable) - Pool = (self.BlockingPool if self.options.get('threads', True) - else self.Pool) - P = self._pool = Pool(processes=self.limit, - initializer=process_initializer, - on_process_exit=process_destructor, - synack=False, - **self.options) - - # Create proxy methods - self.on_apply = P.apply_async - self.maintain_pool = P.maintain_pool - self.terminate_job = P.terminate_job - self.grow = P.grow - self.shrink = P.shrink - self.flush = getattr(P, 'flush', None) # FIXME add to billiard - - def restart(self): - self._pool.restart() - self._pool.apply_async(noop) - - def did_start_ok(self): - return self._pool.did_start_ok() - - def register_with_event_loop(self, loop): - try: - reg = self._pool.register_with_event_loop - except AttributeError: - return - return reg(loop) - - def on_stop(self): - """Gracefully stop the pool.""" - if self._pool is not None and self._pool._state in (RUN, CLOSE): - self._pool.close() - self._pool.join() - self._pool = None - - def on_terminate(self): - """Force terminate the pool.""" - if self._pool is not None: - self._pool.terminate() - self._pool = None - - def on_close(self): - if self._pool is not None and self._pool._state == RUN: - self._pool.close() - - def _get_info(self): - try: - write_stats = self._pool.human_write_stats - except AttributeError: - def write_stats(): - return 'N/A' # only supported by asynpool - return { - 'max-concurrency': self.limit, - 'processes': [p.pid for p in self._pool._pool], - 'max-tasks-per-child': self._pool._maxtasksperchild or 'N/A', - 'put-guarded-by-semaphore': self.putlocks, - 'timeouts': (self._pool.soft_timeout or 0, - self._pool.timeout or 0), - 'writes': write_stats() - } - - @property - def num_processes(self): - return self._pool._processes diff --git a/thesisenv/lib/python3.6/site-packages/celery/concurrency/solo.py b/thesisenv/lib/python3.6/site-packages/celery/concurrency/solo.py deleted file mode 100644 index a2dc199..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/concurrency/solo.py +++ /dev/null @@ -1,30 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.concurrency.solo - ~~~~~~~~~~~~~~~~~~~~~~~ - - Single-threaded pool implementation. - -""" -from __future__ import absolute_import - -import os - -from .base import BasePool, apply_target - -__all__ = ['TaskPool'] - - -class TaskPool(BasePool): - """Solo task pool (blocking, inline, fast).""" - - def __init__(self, *args, **kwargs): - super(TaskPool, self).__init__(*args, **kwargs) - self.on_apply = apply_target - - def _get_info(self): - return {'max-concurrency': 1, - 'processes': [os.getpid()], - 'max-tasks-per-child': None, - 'put-guarded-by-semaphore': True, - 'timeouts': ()} diff --git a/thesisenv/lib/python3.6/site-packages/celery/concurrency/threads.py b/thesisenv/lib/python3.6/site-packages/celery/concurrency/threads.py deleted file mode 100644 index fee901e..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/concurrency/threads.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.concurrency.threads - ~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Pool implementation using threads. - -""" -from __future__ import absolute_import - -from celery.five import UserDict - -from .base import apply_target, BasePool - -__all__ = ['TaskPool'] - - -class NullDict(UserDict): - - def __setitem__(self, key, value): - pass - - -class TaskPool(BasePool): - - def __init__(self, *args, **kwargs): - try: - import threadpool - except ImportError: - raise ImportError( - 'The threaded pool requires the threadpool module.') - self.WorkRequest = threadpool.WorkRequest - self.ThreadPool = threadpool.ThreadPool - super(TaskPool, self).__init__(*args, **kwargs) - - def on_start(self): - self._pool = self.ThreadPool(self.limit) - # threadpool stores all work requests until they are processed - # we don't need this dict, and it occupies way too much memory. - self._pool.workRequests = NullDict() - self._quick_put = self._pool.putRequest - self._quick_clear = self._pool._results_queue.queue.clear - - def on_stop(self): - self._pool.dismissWorkers(self.limit, do_join=True) - - def on_apply(self, target, args=None, kwargs=None, callback=None, - accept_callback=None, **_): - req = self.WorkRequest(apply_target, (target, args, kwargs, callback, - accept_callback)) - self._quick_put(req) - # threadpool also has callback support, - # but for some reason the callback is not triggered - # before you've collected the results. - # Clear the results (if any), so it doesn't grow too large. - self._quick_clear() - return req diff --git a/thesisenv/lib/python3.6/site-packages/celery/contrib/abortable.py b/thesisenv/lib/python3.6/site-packages/celery/contrib/abortable.py deleted file mode 100644 index dcdc615..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/contrib/abortable.py +++ /dev/null @@ -1,172 +0,0 @@ -# -*- coding: utf-8 -*- -""" -========================= -Abortable tasks overview -========================= - -For long-running :class:`Task`'s, it can be desirable to support -aborting during execution. Of course, these tasks should be built to -support abortion specifically. - -The :class:`AbortableTask` serves as a base class for all :class:`Task` -objects that should support abortion by producers. - -* Producers may invoke the :meth:`abort` method on - :class:`AbortableAsyncResult` instances, to request abortion. - -* Consumers (workers) should periodically check (and honor!) the - :meth:`is_aborted` method at controlled points in their task's - :meth:`run` method. The more often, the better. - -The necessary intermediate communication is dealt with by the -:class:`AbortableTask` implementation. - -Usage example -------------- - -In the consumer: - -.. code-block:: python - - from __future__ import absolute_import - - from celery.contrib.abortable import AbortableTask - from celery.utils.log import get_task_logger - - from proj.celery import app - - logger = get_logger(__name__) - - @app.task(bind=True, base=AbortableTask) - def long_running_task(self): - results = [] - for i in range(100): - # check after every 5 iterations... - # (or alternatively, check when some timer is due) - if not i % 5: - if self.is_aborted(): - # respect aborted state, and terminate gracefully. - logger.warning('Task aborted') - return - value = do_something_expensive(i) - results.append(y) - logger.info('Task complete') - return results - -In the producer: - -.. code-block:: python - - from __future__ import absolute_import - - import time - - from proj.tasks import MyLongRunningTask - - def myview(request): - # result is of type AbortableAsyncResult - result = long_running_task.delay() - - # abort the task after 10 seconds - time.sleep(10) - result.abort() - -After the `result.abort()` call, the task execution is not -aborted immediately. In fact, it is not guaranteed to abort at all. Keep -checking `result.state` status, or call `result.get(timeout=)` to -have it block until the task is finished. - -.. note:: - - In order to abort tasks, there needs to be communication between the - producer and the consumer. This is currently implemented through the - database backend. Therefore, this class will only work with the - database backends. - -""" -from __future__ import absolute_import - -from celery import Task -from celery.result import AsyncResult - -__all__ = ['AbortableAsyncResult', 'AbortableTask'] - - -""" -Task States ------------ - -.. state:: ABORTED - -ABORTED -~~~~~~~ - -Task is aborted (typically by the producer) and should be -aborted as soon as possible. - -""" -ABORTED = 'ABORTED' - - -class AbortableAsyncResult(AsyncResult): - """Represents a abortable result. - - Specifically, this gives the `AsyncResult` a :meth:`abort()` method, - which sets the state of the underlying Task to `'ABORTED'`. - - """ - - def is_aborted(self): - """Return :const:`True` if the task is (being) aborted.""" - return self.state == ABORTED - - def abort(self): - """Set the state of the task to :const:`ABORTED`. - - Abortable tasks monitor their state at regular intervals and - terminate execution if so. - - Be aware that invoking this method does not guarantee when the - task will be aborted (or even if the task will be aborted at - all). - - """ - # TODO: store_result requires all four arguments to be set, - # but only status should be updated here - return self.backend.store_result(self.id, result=None, - status=ABORTED, traceback=None) - - -class AbortableTask(Task): - """A celery task that serves as a base class for all :class:`Task`'s - that support aborting during execution. - - All subclasses of :class:`AbortableTask` must call the - :meth:`is_aborted` method periodically and act accordingly when - the call evaluates to :const:`True`. - - """ - abstract = True - - def AsyncResult(self, task_id): - """Return the accompanying AbortableAsyncResult instance.""" - return AbortableAsyncResult(task_id, backend=self.backend) - - def is_aborted(self, **kwargs): - """Checks against the backend whether this - :class:`AbortableAsyncResult` is :const:`ABORTED`. - - Always return :const:`False` in case the `task_id` parameter - refers to a regular (non-abortable) :class:`Task`. - - Be aware that invoking this method will cause a hit in the - backend (for example a database query), so find a good balance - between calling it regularly (for responsiveness), but not too - often (for performance). - - """ - task_id = kwargs.get('task_id', self.request.id) - result = self.AsyncResult(task_id) - if not isinstance(result, AbortableAsyncResult): - return False - return result.is_aborted() diff --git a/thesisenv/lib/python3.6/site-packages/celery/contrib/batches.py b/thesisenv/lib/python3.6/site-packages/celery/contrib/batches.py deleted file mode 100644 index 30f0a20..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/contrib/batches.py +++ /dev/null @@ -1,249 +0,0 @@ -# -*- coding: utf-8 -*- -""" -celery.contrib.batches -====================== - -Experimental task class that buffers messages and processes them as a list. - -.. warning:: - - For this to work you have to set - :setting:`CELERYD_PREFETCH_MULTIPLIER` to zero, or some value where - the final multiplied value is higher than ``flush_every``. - - In the future we hope to add the ability to direct batching tasks - to a channel with different QoS requirements than the task channel. - -**Simple Example** - -A click counter that flushes the buffer every 100 messages, and every -10 seconds. Does not do anything with the data, but can easily be modified -to store it in a database. - -.. code-block:: python - - # Flush after 100 messages, or 10 seconds. - @app.task(base=Batches, flush_every=100, flush_interval=10) - def count_click(requests): - from collections import Counter - count = Counter(request.kwargs['url'] for request in requests) - for url, count in count.items(): - print('>>> Clicks: {0} -> {1}'.format(url, count)) - - -Then you can ask for a click to be counted by doing:: - - >>> count_click.delay(url='http://example.com') - -**Example returning results** - -An interface to the Web of Trust API that flushes the buffer every 100 -messages, and every 10 seconds. - -.. code-block:: python - - import requests - from urlparse import urlparse - - from celery.contrib.batches import Batches - - wot_api_target = 'https://api.mywot.com/0.4/public_link_json' - - @app.task(base=Batches, flush_every=100, flush_interval=10) - def wot_api(requests): - sig = lambda url: url - reponses = wot_api_real( - (sig(*request.args, **request.kwargs) for request in requests) - ) - # use mark_as_done to manually return response data - for response, request in zip(reponses, requests): - app.backend.mark_as_done(request.id, response) - - - def wot_api_real(urls): - domains = [urlparse(url).netloc for url in urls] - response = requests.get( - wot_api_target, - params={'hosts': ('/').join(set(domains)) + '/'} - ) - return [response.json()[domain] for domain in domains] - -Using the API is done as follows:: - - >>> wot_api.delay('http://example.com') - -.. note:: - - If you don't have an ``app`` instance then use the current app proxy - instead:: - - from celery import current_app - app.backend.mark_as_done(request.id, response) - -""" -from __future__ import absolute_import - -from itertools import count - -from celery.task import Task -from celery.five import Empty, Queue -from celery.utils.log import get_logger -from celery.worker.job import Request -from celery.utils import noop - -__all__ = ['Batches'] - -logger = get_logger(__name__) - - -def consume_queue(queue): - """Iterator yielding all immediately available items in a - :class:`Queue.Queue`. - - The iterator stops as soon as the queue raises :exc:`Queue.Empty`. - - *Examples* - - >>> q = Queue() - >>> map(q.put, range(4)) - >>> list(consume_queue(q)) - [0, 1, 2, 3] - >>> list(consume_queue(q)) - [] - - """ - get = queue.get_nowait - while 1: - try: - yield get() - except Empty: - break - - -def apply_batches_task(task, args, loglevel, logfile): - task.push_request(loglevel=loglevel, logfile=logfile) - try: - result = task(*args) - except Exception as exc: - result = None - logger.error('Error: %r', exc, exc_info=True) - finally: - task.pop_request() - return result - - -class SimpleRequest(object): - """Pickleable request.""" - - #: task id - id = None - - #: task name - name = None - - #: positional arguments - args = () - - #: keyword arguments - kwargs = {} - - #: message delivery information. - delivery_info = None - - #: worker node name - hostname = None - - def __init__(self, id, name, args, kwargs, delivery_info, hostname): - self.id = id - self.name = name - self.args = args - self.kwargs = kwargs - self.delivery_info = delivery_info - self.hostname = hostname - - @classmethod - def from_request(cls, request): - return cls(request.id, request.name, request.args, - request.kwargs, request.delivery_info, request.hostname) - - -class Batches(Task): - abstract = True - - #: Maximum number of message in buffer. - flush_every = 10 - - #: Timeout in seconds before buffer is flushed anyway. - flush_interval = 30 - - def __init__(self): - self._buffer = Queue() - self._count = count(1) - self._tref = None - self._pool = None - - def run(self, requests): - raise NotImplementedError('must implement run(requests)') - - def Strategy(self, task, app, consumer): - self._pool = consumer.pool - hostname = consumer.hostname - eventer = consumer.event_dispatcher - Req = Request - connection_errors = consumer.connection_errors - timer = consumer.timer - put_buffer = self._buffer.put - flush_buffer = self._do_flush - - def task_message_handler(message, body, ack, reject, callbacks, **kw): - request = Req(body, on_ack=ack, app=app, hostname=hostname, - events=eventer, task=task, - connection_errors=connection_errors, - delivery_info=message.delivery_info) - put_buffer(request) - - if self._tref is None: # first request starts flush timer. - self._tref = timer.call_repeatedly( - self.flush_interval, flush_buffer, - ) - - if not next(self._count) % self.flush_every: - flush_buffer() - - return task_message_handler - - def flush(self, requests): - return self.apply_buffer(requests, ([SimpleRequest.from_request(r) - for r in requests], )) - - def _do_flush(self): - logger.debug('Batches: Wake-up to flush buffer...') - requests = None - if self._buffer.qsize(): - requests = list(consume_queue(self._buffer)) - if requests: - logger.debug('Batches: Buffer complete: %s', len(requests)) - self.flush(requests) - if not requests: - logger.debug('Batches: Canceling timer: Nothing in buffer.') - if self._tref: - self._tref.cancel() # cancel timer. - self._tref = None - - def apply_buffer(self, requests, args=(), kwargs={}): - acks_late = [], [] - [acks_late[r.task.acks_late].append(r) for r in requests] - assert requests and (acks_late[True] or acks_late[False]) - - def on_accepted(pid, time_accepted): - [req.acknowledge() for req in acks_late[False]] - - def on_return(result): - [req.acknowledge() for req in acks_late[True]] - - return self._pool.apply_async( - apply_batches_task, - (self, args, 0, None), - accept_callback=on_accepted, - callback=acks_late[True] and on_return or noop, - ) diff --git a/thesisenv/lib/python3.6/site-packages/celery/contrib/methods.py b/thesisenv/lib/python3.6/site-packages/celery/contrib/methods.py deleted file mode 100644 index 56aa7f4..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/contrib/methods.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- coding: utf-8 -*- -""" -celery.contrib.methods -====================== - -Task decorator that supports creating tasks out of methods. - -Examples --------- - -.. code-block:: python - - from celery.contrib.methods import task - - class X(object): - - @task() - def add(self, x, y): - return x + y - -or with any task decorator: - -.. code-block:: python - - from celery.contrib.methods import task_method - - class X(object): - - @app.task(filter=task_method) - def add(self, x, y): - return x + y - -.. note:: - - The task must use the new Task base class (:class:`celery.Task`), - and the old base class using classmethods (``celery.task.Task``, - ``celery.task.base.Task``). - - This means that you have to use the task decorator from a Celery app - instance, and not the old-API: - - .. code-block:: python - - - from celery import task # BAD - from celery.task import task # ALSO BAD - - # GOOD: - app = Celery(...) - - @app.task(filter=task_method) - def foo(self): pass - - # ALSO GOOD: - from celery import current_app - - @current_app.task(filter=task_method) - def foo(self): pass - - # ALSO GOOD: - from celery import shared_task - - @shared_task(filter=task_method) - def foo(self): pass - -Caveats -------- - -- Automatic naming won't be able to know what the class name is. - - The name will still be module_name + task_name, - so two methods with the same name in the same module will collide - so that only one task can run: - - .. code-block:: python - - class A(object): - - @task() - def add(self, x, y): - return x + y - - class B(object): - - @task() - def add(self, x, y): - return x + y - - would have to be written as: - - .. code-block:: python - - class A(object): - @task(name='A.add') - def add(self, x, y): - return x + y - - class B(object): - @task(name='B.add') - def add(self, x, y): - return x + y - -""" - -from __future__ import absolute_import - -from celery import current_app - -__all__ = ['task_method', 'task'] - - -class task_method(object): - - def __init__(self, task, *args, **kwargs): - self.task = task - - def __get__(self, obj, type=None): - if obj is None: - return self.task - task = self.task.__class__() - task.__self__ = obj - return task - - -def task(*args, **kwargs): - return current_app.task(*args, **dict(kwargs, filter=task_method)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/contrib/migrate.py b/thesisenv/lib/python3.6/site-packages/celery/contrib/migrate.py deleted file mode 100644 index e4a10e9..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/contrib/migrate.py +++ /dev/null @@ -1,365 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.contrib.migrate - ~~~~~~~~~~~~~~~~~~~~~~ - - Migration tools. - -""" -from __future__ import absolute_import, print_function, unicode_literals - -import socket - -from functools import partial -from itertools import cycle, islice - -from kombu import eventloop, Queue -from kombu.common import maybe_declare -from kombu.utils.encoding import ensure_bytes - -from celery.app import app_or_default -from celery.five import string, string_t -from celery.utils import worker_direct - -__all__ = ['StopFiltering', 'State', 'republish', 'migrate_task', - 'migrate_tasks', 'move', 'task_id_eq', 'task_id_in', - 'start_filter', 'move_task_by_id', 'move_by_idmap', - 'move_by_taskmap', 'move_direct', 'move_direct_by_id'] - -MOVING_PROGRESS_FMT = """\ -Moving task {state.filtered}/{state.strtotal}: \ -{body[task]}[{body[id]}]\ -""" - - -class StopFiltering(Exception): - pass - - -class State(object): - count = 0 - filtered = 0 - total_apx = 0 - - @property - def strtotal(self): - if not self.total_apx: - return '?' - return string(self.total_apx) - - def __repr__(self): - if self.filtered: - return '^{0.filtered}'.format(self) - return '{0.count}/{0.strtotal}'.format(self) - - -def republish(producer, message, exchange=None, routing_key=None, - remove_props=['application_headers', - 'content_type', - 'content_encoding', - 'headers']): - body = ensure_bytes(message.body) # use raw message body. - info, headers, props = (message.delivery_info, - message.headers, message.properties) - exchange = info['exchange'] if exchange is None else exchange - routing_key = info['routing_key'] if routing_key is None else routing_key - ctype, enc = message.content_type, message.content_encoding - # remove compression header, as this will be inserted again - # when the message is recompressed. - compression = headers.pop('compression', None) - - for key in remove_props: - props.pop(key, None) - - producer.publish(ensure_bytes(body), exchange=exchange, - routing_key=routing_key, compression=compression, - headers=headers, content_type=ctype, - content_encoding=enc, **props) - - -def migrate_task(producer, body_, message, queues=None): - info = message.delivery_info - queues = {} if queues is None else queues - republish(producer, message, - exchange=queues.get(info['exchange']), - routing_key=queues.get(info['routing_key'])) - - -def filter_callback(callback, tasks): - - def filtered(body, message): - if tasks and body['task'] not in tasks: - return - - return callback(body, message) - return filtered - - -def migrate_tasks(source, dest, migrate=migrate_task, app=None, - queues=None, **kwargs): - app = app_or_default(app) - queues = prepare_queues(queues) - producer = app.amqp.TaskProducer(dest) - migrate = partial(migrate, producer, queues=queues) - - def on_declare_queue(queue): - new_queue = queue(producer.channel) - new_queue.name = queues.get(queue.name, queue.name) - if new_queue.routing_key == queue.name: - new_queue.routing_key = queues.get(queue.name, - new_queue.routing_key) - if new_queue.exchange.name == queue.name: - new_queue.exchange.name = queues.get(queue.name, queue.name) - new_queue.declare() - - return start_filter(app, source, migrate, queues=queues, - on_declare_queue=on_declare_queue, **kwargs) - - -def _maybe_queue(app, q): - if isinstance(q, string_t): - return app.amqp.queues[q] - return q - - -def move(predicate, connection=None, exchange=None, routing_key=None, - source=None, app=None, callback=None, limit=None, transform=None, - **kwargs): - """Find tasks by filtering them and move the tasks to a new queue. - - :param predicate: Filter function used to decide which messages - to move. Must accept the standard signature of ``(body, message)`` - used by Kombu consumer callbacks. If the predicate wants the message - to be moved it must return either: - - 1) a tuple of ``(exchange, routing_key)``, or - - 2) a :class:`~kombu.entity.Queue` instance, or - - 3) any other true value which means the specified - ``exchange`` and ``routing_key`` arguments will be used. - - :keyword connection: Custom connection to use. - :keyword source: Optional list of source queues to use instead of the - default (which is the queues in :setting:`CELERY_QUEUES`). - This list can also contain new :class:`~kombu.entity.Queue` instances. - :keyword exchange: Default destination exchange. - :keyword routing_key: Default destination routing key. - :keyword limit: Limit number of messages to filter. - :keyword callback: Callback called after message moved, - with signature ``(state, body, message)``. - :keyword transform: Optional function to transform the return - value (destination) of the filter function. - - Also supports the same keyword arguments as :func:`start_filter`. - - To demonstrate, the :func:`move_task_by_id` operation can be implemented - like this: - - .. code-block:: python - - def is_wanted_task(body, message): - if body['id'] == wanted_id: - return Queue('foo', exchange=Exchange('foo'), - routing_key='foo') - - move(is_wanted_task) - - or with a transform: - - .. code-block:: python - - def transform(value): - if isinstance(value, string_t): - return Queue(value, Exchange(value), value) - return value - - move(is_wanted_task, transform=transform) - - The predicate may also return a tuple of ``(exchange, routing_key)`` - to specify the destination to where the task should be moved, - or a :class:`~kombu.entitiy.Queue` instance. - Any other true value means that the task will be moved to the - default exchange/routing_key. - - """ - app = app_or_default(app) - queues = [_maybe_queue(app, queue) for queue in source or []] or None - with app.connection_or_acquire(connection, pool=False) as conn: - producer = app.amqp.TaskProducer(conn) - state = State() - - def on_task(body, message): - ret = predicate(body, message) - if ret: - if transform: - ret = transform(ret) - if isinstance(ret, Queue): - maybe_declare(ret, conn.default_channel) - ex, rk = ret.exchange.name, ret.routing_key - else: - ex, rk = expand_dest(ret, exchange, routing_key) - republish(producer, message, - exchange=ex, routing_key=rk) - message.ack() - - state.filtered += 1 - if callback: - callback(state, body, message) - if limit and state.filtered >= limit: - raise StopFiltering() - - return start_filter(app, conn, on_task, consume_from=queues, **kwargs) - - -def expand_dest(ret, exchange, routing_key): - try: - ex, rk = ret - except (TypeError, ValueError): - ex, rk = exchange, routing_key - return ex, rk - - -def task_id_eq(task_id, body, message): - return body['id'] == task_id - - -def task_id_in(ids, body, message): - return body['id'] in ids - - -def prepare_queues(queues): - if isinstance(queues, string_t): - queues = queues.split(',') - if isinstance(queues, list): - queues = dict(tuple(islice(cycle(q.split(':')), None, 2)) - for q in queues) - if queues is None: - queues = {} - return queues - - -def start_filter(app, conn, filter, limit=None, timeout=1.0, - ack_messages=False, tasks=None, queues=None, - callback=None, forever=False, on_declare_queue=None, - consume_from=None, state=None, accept=None, **kwargs): - state = state or State() - queues = prepare_queues(queues) - consume_from = [_maybe_queue(app, q) - for q in consume_from or list(queues)] - if isinstance(tasks, string_t): - tasks = set(tasks.split(',')) - if tasks is None: - tasks = set([]) - - def update_state(body, message): - state.count += 1 - if limit and state.count >= limit: - raise StopFiltering() - - def ack_message(body, message): - message.ack() - - consumer = app.amqp.TaskConsumer(conn, queues=consume_from, accept=accept) - - if tasks: - filter = filter_callback(filter, tasks) - update_state = filter_callback(update_state, tasks) - ack_message = filter_callback(ack_message, tasks) - - consumer.register_callback(filter) - consumer.register_callback(update_state) - if ack_messages: - consumer.register_callback(ack_message) - if callback is not None: - callback = partial(callback, state) - if tasks: - callback = filter_callback(callback, tasks) - consumer.register_callback(callback) - - # declare all queues on the new broker. - for queue in consumer.queues: - if queues and queue.name not in queues: - continue - if on_declare_queue is not None: - on_declare_queue(queue) - try: - _, mcount, _ = queue(consumer.channel).queue_declare(passive=True) - if mcount: - state.total_apx += mcount - except conn.channel_errors: - pass - - # start migrating messages. - with consumer: - try: - for _ in eventloop(conn, # pragma: no cover - timeout=timeout, ignore_timeouts=forever): - pass - except socket.timeout: - pass - except StopFiltering: - pass - return state - - -def move_task_by_id(task_id, dest, **kwargs): - """Find a task by id and move it to another queue. - - :param task_id: Id of task to move. - :param dest: Destination queue. - - Also supports the same keyword arguments as :func:`move`. - - """ - return move_by_idmap({task_id: dest}, **kwargs) - - -def move_by_idmap(map, **kwargs): - """Moves tasks by matching from a ``task_id: queue`` mapping, - where ``queue`` is a queue to move the task to. - - Example:: - - >>> move_by_idmap({ - ... '5bee6e82-f4ac-468e-bd3d-13e8600250bc': Queue('name'), - ... 'ada8652d-aef3-466b-abd2-becdaf1b82b3': Queue('name'), - ... '3a2b140d-7db1-41ba-ac90-c36a0ef4ab1f': Queue('name')}, - ... queues=['hipri']) - - """ - def task_id_in_map(body, message): - return map.get(body['id']) - - # adding the limit means that we don't have to consume any more - # when we've found everything. - return move(task_id_in_map, limit=len(map), **kwargs) - - -def move_by_taskmap(map, **kwargs): - """Moves tasks by matching from a ``task_name: queue`` mapping, - where ``queue`` is the queue to move the task to. - - Example:: - - >>> move_by_taskmap({ - ... 'tasks.add': Queue('name'), - ... 'tasks.mul': Queue('name'), - ... }) - - """ - - def task_name_in_map(body, message): - return map.get(body['task']) # <- name of task - - return move(task_name_in_map, **kwargs) - - -def filter_status(state, body, message, **kwargs): - print(MOVING_PROGRESS_FMT.format(state=state, body=body, **kwargs)) - - -move_direct = partial(move, transform=worker_direct) -move_direct_by_id = partial(move_task_by_id, transform=worker_direct) -move_direct_by_idmap = partial(move_by_idmap, transform=worker_direct) -move_direct_by_taskmap = partial(move_by_taskmap, transform=worker_direct) diff --git a/thesisenv/lib/python3.6/site-packages/celery/contrib/rdb.py b/thesisenv/lib/python3.6/site-packages/celery/contrib/rdb.py deleted file mode 100644 index 3f218ae..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/contrib/rdb.py +++ /dev/null @@ -1,183 +0,0 @@ -# -*- coding: utf-8 -*- -""" -celery.contrib.rdb -================== - -Remote debugger for Celery tasks running in multiprocessing pool workers. -Inspired by http://snippets.dzone.com/posts/show/7248 - -**Usage** - -.. code-block:: python - - from celery.contrib import rdb - from celery import task - - @task() - def add(x, y): - result = x + y - rdb.set_trace() - return result - - -**Environment Variables** - -.. envvar:: CELERY_RDB_HOST - - Hostname to bind to. Default is '127.0.01', which means the socket - will only be accessible from the local host. - -.. envvar:: CELERY_RDB_PORT - - Base port to bind to. Default is 6899. - The debugger will try to find an available port starting from the - base port. The selected port will be logged by the worker. - -""" -from __future__ import absolute_import, print_function - -import errno -import os -import socket -import sys - -from pdb import Pdb - -from billiard import current_process - -from celery.five import range - -__all__ = ['CELERY_RDB_HOST', 'CELERY_RDB_PORT', 'default_port', - 'Rdb', 'debugger', 'set_trace'] - -default_port = 6899 - -CELERY_RDB_HOST = os.environ.get('CELERY_RDB_HOST') or '127.0.0.1' -CELERY_RDB_PORT = int(os.environ.get('CELERY_RDB_PORT') or default_port) - -#: Holds the currently active debugger. -_current = [None] - -_frame = getattr(sys, '_getframe') - -NO_AVAILABLE_PORT = """\ -{self.ident}: Couldn't find an available port. - -Please specify one using the CELERY_RDB_PORT environment variable. -""" - -BANNER = """\ -{self.ident}: Please telnet into {self.host} {self.port}. - -Type `exit` in session to continue. - -{self.ident}: Waiting for client... -""" - -SESSION_STARTED = '{self.ident}: Now in session with {self.remote_addr}.' -SESSION_ENDED = '{self.ident}: Session with {self.remote_addr} ended.' - - -class Rdb(Pdb): - me = 'Remote Debugger' - _prev_outs = None - _sock = None - - def __init__(self, host=CELERY_RDB_HOST, port=CELERY_RDB_PORT, - port_search_limit=100, port_skew=+0, out=sys.stdout): - self.active = True - self.out = out - - self._prev_handles = sys.stdin, sys.stdout - - self._sock, this_port = self.get_avail_port( - host, port, port_search_limit, port_skew, - ) - self._sock.setblocking(1) - self._sock.listen(1) - self.ident = '{0}:{1}'.format(self.me, this_port) - self.host = host - self.port = this_port - self.say(BANNER.format(self=self)) - - self._client, address = self._sock.accept() - self._client.setblocking(1) - self.remote_addr = ':'.join(str(v) for v in address) - self.say(SESSION_STARTED.format(self=self)) - self._handle = sys.stdin = sys.stdout = self._client.makefile('rw') - Pdb.__init__(self, completekey='tab', - stdin=self._handle, stdout=self._handle) - - def get_avail_port(self, host, port, search_limit=100, skew=+0): - try: - _, skew = current_process().name.split('-') - skew = int(skew) - except ValueError: - pass - this_port = None - for i in range(search_limit): - _sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - this_port = port + skew + i - try: - _sock.bind((host, this_port)) - except socket.error as exc: - if exc.errno in [errno.EADDRINUSE, errno.EINVAL]: - continue - raise - else: - return _sock, this_port - else: - raise Exception(NO_AVAILABLE_PORT.format(self=self)) - - def say(self, m): - print(m, file=self.out) - - def __enter__(self): - return self - - def __exit__(self, *exc_info): - self._close_session() - - def _close_session(self): - self.stdin, self.stdout = sys.stdin, sys.stdout = self._prev_handles - if self.active: - if self._handle is not None: - self._handle.close() - if self._client is not None: - self._client.close() - if self._sock is not None: - self._sock.close() - self.active = False - self.say(SESSION_ENDED.format(self=self)) - - def do_continue(self, arg): - self._close_session() - self.set_continue() - return 1 - do_c = do_cont = do_continue - - def do_quit(self, arg): - self._close_session() - self.set_quit() - return 1 - do_q = do_exit = do_quit - - def set_quit(self): - # this raises a BdbQuit exception that we are unable to catch. - sys.settrace(None) - - -def debugger(): - """Return the current debugger instance (if any), - or creates a new one.""" - rdb = _current[0] - if rdb is None or not rdb.active: - rdb = _current[0] = Rdb() - return rdb - - -def set_trace(frame=None): - """Set breakpoint at current location, or a specified frame""" - if frame is None: - frame = _frame().f_back - return debugger().set_trace(frame) diff --git a/thesisenv/lib/python3.6/site-packages/celery/contrib/sphinx.py b/thesisenv/lib/python3.6/site-packages/celery/contrib/sphinx.py deleted file mode 100644 index 2e57431..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/contrib/sphinx.py +++ /dev/null @@ -1,76 +0,0 @@ -# -*- coding: utf-8 -*- -""" -celery.contrib.sphinx -===================== - -Sphinx documentation plugin - -**Usage** - -Add the extension to your :file:`docs/conf.py` configuration module: - -.. code-block:: python - - extensions = (..., - 'celery.contrib.sphinx') - -If you would like to change the prefix for tasks in reference documentation -then you can change the ``celery_task_prefix`` configuration value: - -.. code-block:: python - - celery_task_prefix = '(task)' # < default - - -With the extension installed `autodoc` will automatically find -task decorated objects and generate the correct (as well as -add a ``(task)`` prefix), and you can also refer to the tasks -using `:task:proj.tasks.add` syntax. - -Use ``.. autotask::`` to manually document a task. - -""" -from __future__ import absolute_import - -try: - from inspect import formatargspec, getfullargspec as getargspec -except ImportError: # Py2 - from inspect import formatargspec, getargspec # noqa - -from sphinx.domains.python import PyModulelevel -from sphinx.ext.autodoc import FunctionDocumenter - -from celery.app.task import BaseTask - - -class TaskDocumenter(FunctionDocumenter): - objtype = 'task' - member_order = 11 - - @classmethod - def can_document_member(cls, member, membername, isattr, parent): - return isinstance(member, BaseTask) and getattr(member, '__wrapped__') - - def format_args(self): - wrapped = getattr(self.object, '__wrapped__') - if wrapped is not None: - argspec = getargspec(wrapped) - fmt = formatargspec(*argspec) - fmt = fmt.replace('\\', '\\\\') - return fmt - return '' - - def document_members(self, all_members=False): - pass - - -class TaskDirective(PyModulelevel): - - def get_signature_prefix(self, sig): - return self.env.config.celery_task_prefix - - -def setup(app): - app.add_autodocumenter(TaskDocumenter) - app.domains['py'].directives['task'] = TaskDirective - app.add_config_value('celery_task_prefix', '(task)', True) diff --git a/thesisenv/lib/python3.6/site-packages/celery/datastructures.py b/thesisenv/lib/python3.6/site-packages/celery/datastructures.py deleted file mode 100644 index 32a1d54..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/datastructures.py +++ /dev/null @@ -1,671 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.datastructures - ~~~~~~~~~~~~~~~~~~~~~ - - Custom types and data structures. - -""" -from __future__ import absolute_import, print_function, unicode_literals - -import sys -import time - -from collections import defaultdict, Mapping, MutableMapping, MutableSet -from heapq import heapify, heappush, heappop -from functools import partial -from itertools import chain - -from billiard.einfo import ExceptionInfo # noqa -from kombu.utils.encoding import safe_str -from kombu.utils.limits import TokenBucket # noqa - -from celery.five import items -from celery.utils.functional import LRUCache, first, uniq # noqa - -try: - from django.utils.functional import LazyObject, LazySettings -except ImportError: - class LazyObject(object): # noqa - pass - LazySettings = LazyObject # noqa - -DOT_HEAD = """ -{IN}{type} {id} {{ -{INp}graph [{attrs}] -""" -DOT_ATTR = '{name}={value}' -DOT_NODE = '{INp}"{0}" [{attrs}]' -DOT_EDGE = '{INp}"{0}" {dir} "{1}" [{attrs}]' -DOT_ATTRSEP = ', ' -DOT_DIRS = {'graph': '--', 'digraph': '->'} -DOT_TAIL = '{IN}}}' - -__all__ = ['GraphFormatter', 'CycleError', 'DependencyGraph', - 'AttributeDictMixin', 'AttributeDict', 'DictAttribute', - 'ConfigurationView', 'LimitedSet'] - - -def force_mapping(m): - if isinstance(m, (LazyObject, LazySettings)): - m = m._wrapped - return DictAttribute(m) if not isinstance(m, Mapping) else m - - -class GraphFormatter(object): - _attr = DOT_ATTR.strip() - _node = DOT_NODE.strip() - _edge = DOT_EDGE.strip() - _head = DOT_HEAD.strip() - _tail = DOT_TAIL.strip() - _attrsep = DOT_ATTRSEP - _dirs = dict(DOT_DIRS) - - scheme = { - 'shape': 'box', - 'arrowhead': 'vee', - 'style': 'filled', - 'fontname': 'HelveticaNeue', - } - edge_scheme = { - 'color': 'darkseagreen4', - 'arrowcolor': 'black', - 'arrowsize': 0.7, - } - node_scheme = {'fillcolor': 'palegreen3', 'color': 'palegreen4'} - term_scheme = {'fillcolor': 'palegreen1', 'color': 'palegreen2'} - graph_scheme = {'bgcolor': 'mintcream'} - - def __init__(self, root=None, type=None, id=None, - indent=0, inw=' ' * 4, **scheme): - self.id = id or 'dependencies' - self.root = root - self.type = type or 'digraph' - self.direction = self._dirs[self.type] - self.IN = inw * (indent or 0) - self.INp = self.IN + inw - self.scheme = dict(self.scheme, **scheme) - self.graph_scheme = dict(self.graph_scheme, root=self.label(self.root)) - - def attr(self, name, value): - value = '"{0}"'.format(value) - return self.FMT(self._attr, name=name, value=value) - - def attrs(self, d, scheme=None): - d = dict(self.scheme, **dict(scheme, **d or {}) if scheme else d) - return self._attrsep.join( - safe_str(self.attr(k, v)) for k, v in items(d) - ) - - def head(self, **attrs): - return self.FMT( - self._head, id=self.id, type=self.type, - attrs=self.attrs(attrs, self.graph_scheme), - ) - - def tail(self): - return self.FMT(self._tail) - - def label(self, obj): - return obj - - def node(self, obj, **attrs): - return self.draw_node(obj, self.node_scheme, attrs) - - def terminal_node(self, obj, **attrs): - return self.draw_node(obj, self.term_scheme, attrs) - - def edge(self, a, b, **attrs): - return self.draw_edge(a, b, **attrs) - - def _enc(self, s): - return s.encode('utf-8', 'ignore') - - def FMT(self, fmt, *args, **kwargs): - return self._enc(fmt.format( - *args, **dict(kwargs, IN=self.IN, INp=self.INp) - )) - - def draw_edge(self, a, b, scheme=None, attrs=None): - return self.FMT( - self._edge, self.label(a), self.label(b), - dir=self.direction, attrs=self.attrs(attrs, self.edge_scheme), - ) - - def draw_node(self, obj, scheme=None, attrs=None): - return self.FMT( - self._node, self.label(obj), attrs=self.attrs(attrs, scheme), - ) - - -class CycleError(Exception): - """A cycle was detected in an acyclic graph.""" - - -class DependencyGraph(object): - """A directed acyclic graph of objects and their dependencies. - - Supports a robust topological sort - to detect the order in which they must be handled. - - Takes an optional iterator of ``(obj, dependencies)`` - tuples to build the graph from. - - .. warning:: - - Does not support cycle detection. - - """ - - def __init__(self, it=None, formatter=None): - self.formatter = formatter or GraphFormatter() - self.adjacent = {} - if it is not None: - self.update(it) - - def add_arc(self, obj): - """Add an object to the graph.""" - self.adjacent.setdefault(obj, []) - - def add_edge(self, A, B): - """Add an edge from object ``A`` to object ``B`` - (``A`` depends on ``B``).""" - self[A].append(B) - - def connect(self, graph): - """Add nodes from another graph.""" - self.adjacent.update(graph.adjacent) - - def topsort(self): - """Sort the graph topologically. - - :returns: a list of objects in the order - in which they must be handled. - - """ - graph = DependencyGraph() - components = self._tarjan72() - - NC = dict((node, component) - for component in components - for node in component) - for component in components: - graph.add_arc(component) - for node in self: - node_c = NC[node] - for successor in self[node]: - successor_c = NC[successor] - if node_c != successor_c: - graph.add_edge(node_c, successor_c) - return [t[0] for t in graph._khan62()] - - def valency_of(self, obj): - """Return the valency (degree) of a vertex in the graph.""" - try: - l = [len(self[obj])] - except KeyError: - return 0 - for node in self[obj]: - l.append(self.valency_of(node)) - return sum(l) - - def update(self, it): - """Update the graph with data from a list - of ``(obj, dependencies)`` tuples.""" - tups = list(it) - for obj, _ in tups: - self.add_arc(obj) - for obj, deps in tups: - for dep in deps: - self.add_edge(obj, dep) - - def edges(self): - """Return generator that yields for all edges in the graph.""" - return (obj for obj, adj in items(self) if adj) - - def _khan62(self): - """Khans simple topological sort algorithm from '62 - - See http://en.wikipedia.org/wiki/Topological_sorting - - """ - count = defaultdict(lambda: 0) - result = [] - - for node in self: - for successor in self[node]: - count[successor] += 1 - ready = [node for node in self if not count[node]] - - while ready: - node = ready.pop() - result.append(node) - - for successor in self[node]: - count[successor] -= 1 - if count[successor] == 0: - ready.append(successor) - result.reverse() - return result - - def _tarjan72(self): - """Tarjan's algorithm to find strongly connected components. - - See http://bit.ly/vIMv3h. - - """ - result, stack, low = [], [], {} - - def visit(node): - if node in low: - return - num = len(low) - low[node] = num - stack_pos = len(stack) - stack.append(node) - - for successor in self[node]: - visit(successor) - low[node] = min(low[node], low[successor]) - - if num == low[node]: - component = tuple(stack[stack_pos:]) - stack[stack_pos:] = [] - result.append(component) - for item in component: - low[item] = len(self) - - for node in self: - visit(node) - - return result - - def to_dot(self, fh, formatter=None): - """Convert the graph to DOT format. - - :param fh: A file, or a file-like object to write the graph to. - - """ - seen = set() - draw = formatter or self.formatter - P = partial(print, file=fh) - - def if_not_seen(fun, obj): - if draw.label(obj) not in seen: - P(fun(obj)) - seen.add(draw.label(obj)) - - P(draw.head()) - for obj, adjacent in items(self): - if not adjacent: - if_not_seen(draw.terminal_node, obj) - for req in adjacent: - if_not_seen(draw.node, obj) - P(draw.edge(obj, req)) - P(draw.tail()) - - def format(self, obj): - return self.formatter(obj) if self.formatter else obj - - def __iter__(self): - return iter(self.adjacent) - - def __getitem__(self, node): - return self.adjacent[node] - - def __len__(self): - return len(self.adjacent) - - def __contains__(self, obj): - return obj in self.adjacent - - def _iterate_items(self): - return items(self.adjacent) - items = iteritems = _iterate_items - - def __repr__(self): - return '\n'.join(self.repr_node(N) for N in self) - - def repr_node(self, obj, level=1, fmt='{0}({1})'): - output = [fmt.format(obj, self.valency_of(obj))] - if obj in self: - for other in self[obj]: - d = fmt.format(other, self.valency_of(other)) - output.append(' ' * level + d) - output.extend(self.repr_node(other, level + 1).split('\n')[1:]) - return '\n'.join(output) - - -class AttributeDictMixin(object): - """Augment classes with a Mapping interface by adding attribute access. - - I.e. `d.key -> d[key]`. - - """ - - def __getattr__(self, k): - """`d.key -> d[key]`""" - try: - return self[k] - except KeyError: - raise AttributeError( - '{0!r} object has no attribute {1!r}'.format( - type(self).__name__, k)) - - def __setattr__(self, key, value): - """`d[key] = value -> d.key = value`""" - self[key] = value - - -class AttributeDict(dict, AttributeDictMixin): - """Dict subclass with attribute access.""" - pass - - -class DictAttribute(object): - """Dict interface to attributes. - - `obj[k] -> obj.k` - `obj[k] = val -> obj.k = val` - - """ - obj = None - - def __init__(self, obj): - object.__setattr__(self, 'obj', obj) - - def __getattr__(self, key): - return getattr(self.obj, key) - - def __setattr__(self, key, value): - return setattr(self.obj, key, value) - - def get(self, key, default=None): - try: - return self[key] - except KeyError: - return default - - def setdefault(self, key, default): - try: - return self[key] - except KeyError: - self[key] = default - return default - - def __getitem__(self, key): - try: - return getattr(self.obj, key) - except AttributeError: - raise KeyError(key) - - def __setitem__(self, key, value): - setattr(self.obj, key, value) - - def __contains__(self, key): - return hasattr(self.obj, key) - - def _iterate_keys(self): - return iter(dir(self.obj)) - iterkeys = _iterate_keys - - def __iter__(self): - return self._iterate_keys() - - def _iterate_items(self): - for key in self._iterate_keys(): - yield key, getattr(self.obj, key) - iteritems = _iterate_items - - def _iterate_values(self): - for key in self._iterate_keys(): - yield getattr(self.obj, key) - itervalues = _iterate_values - - if sys.version_info[0] == 3: # pragma: no cover - items = _iterate_items - keys = _iterate_keys - values = _iterate_values - else: - - def keys(self): - return list(self) - - def items(self): - return list(self._iterate_items()) - - def values(self): - return list(self._iterate_values()) -MutableMapping.register(DictAttribute) - - -class ConfigurationView(AttributeDictMixin): - """A view over an applications configuration dicts. - - Custom (but older) version of :class:`collections.ChainMap`. - - If the key does not exist in ``changes``, the ``defaults`` dicts - are consulted. - - :param changes: Dict containing changes to the configuration. - :param defaults: List of dicts containing the default configuration. - - """ - changes = None - defaults = None - _order = None - - def __init__(self, changes, defaults): - self.__dict__.update(changes=changes, defaults=defaults, - _order=[changes] + defaults) - - def add_defaults(self, d): - d = force_mapping(d) - self.defaults.insert(0, d) - self._order.insert(1, d) - - def __getitem__(self, key): - for d in self._order: - try: - return d[key] - except KeyError: - pass - raise KeyError(key) - - def __setitem__(self, key, value): - self.changes[key] = value - - def first(self, *keys): - return first(None, (self.get(key) for key in keys)) - - def get(self, key, default=None): - try: - return self[key] - except KeyError: - return default - - def clear(self): - """Remove all changes, but keep defaults.""" - self.changes.clear() - - def setdefault(self, key, default): - try: - return self[key] - except KeyError: - self[key] = default - return default - - def update(self, *args, **kwargs): - return self.changes.update(*args, **kwargs) - - def __contains__(self, key): - return any(key in m for m in self._order) - - def __bool__(self): - return any(self._order) - __nonzero__ = __bool__ # Py2 - - def __repr__(self): - return repr(dict(items(self))) - - def __iter__(self): - return self._iterate_keys() - - def __len__(self): - # The logic for iterating keys includes uniq(), - # so to be safe we count by explicitly iterating - return len(set().union(*self._order)) - - def _iter(self, op): - # defaults must be first in the stream, so values in - # changes takes precedence. - return chain(*[op(d) for d in reversed(self._order)]) - - def _iterate_keys(self): - return uniq(self._iter(lambda d: d)) - iterkeys = _iterate_keys - - def _iterate_items(self): - return ((key, self[key]) for key in self) - iteritems = _iterate_items - - def _iterate_values(self): - return (self[key] for key in self) - itervalues = _iterate_values - - if sys.version_info[0] == 3: # pragma: no cover - keys = _iterate_keys - items = _iterate_items - values = _iterate_values - - else: # noqa - def keys(self): - return list(self._iterate_keys()) - - def items(self): - return list(self._iterate_items()) - - def values(self): - return list(self._iterate_values()) - -MutableMapping.register(ConfigurationView) - - -class LimitedSet(object): - """Kind-of Set with limitations. - - Good for when you need to test for membership (`a in set`), - but the set should not grow unbounded. - - :keyword maxlen: Maximum number of members before we start - evicting expired members. - :keyword expires: Time in seconds, before a membership expires. - - """ - - def __init__(self, maxlen=None, expires=None, data=None, heap=None): - # heap is ignored - self.maxlen = maxlen - self.expires = expires - self._data = {} if data is None else data - self._heap = [] - - # make shortcuts - self.__len__ = self._heap.__len__ - self.__contains__ = self._data.__contains__ - - self._refresh_heap() - - def _refresh_heap(self): - self._heap[:] = [(t, key) for key, t in items(self._data)] - heapify(self._heap) - - def add(self, key, now=time.time, heappush=heappush): - """Add a new member.""" - # offset is there to modify the length of the list, - # this way we can expire an item before inserting the value, - # and it will end up in the correct order. - self.purge(1, offset=1) - inserted = now() - self._data[key] = inserted - heappush(self._heap, (inserted, key)) - - def clear(self): - """Remove all members""" - self._data.clear() - self._heap[:] = [] - - def discard(self, value): - """Remove membership by finding value.""" - try: - itime = self._data[value] - except KeyError: - return - try: - self._heap.remove((itime, value)) - except ValueError: - pass - self._data.pop(value, None) - pop_value = discard # XXX compat - - def purge(self, limit=None, offset=0, now=time.time): - """Purge expired items.""" - H, maxlen = self._heap, self.maxlen - if not maxlen: - return - - # If the data/heap gets corrupted and limit is None - # this will go into an infinite loop, so limit must - # have a value to guard the loop. - limit = len(self) + offset if limit is None else limit - - i = 0 - while len(self) + offset > maxlen: - if i >= limit: - break - try: - item = heappop(H) - except IndexError: - break - if self.expires: - if now() < item[0] + self.expires: - heappush(H, item) - break - try: - self._data.pop(item[1]) - except KeyError: # out of sync with heap - pass - i += 1 - - def update(self, other): - if isinstance(other, LimitedSet): - self._data.update(other._data) - self._refresh_heap() - else: - for obj in other: - self.add(obj) - - def as_dict(self): - return self._data - - def __eq__(self, other): - return self._heap == other._heap - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return 'LimitedSet({0})'.format(len(self)) - - def __iter__(self): - return (item[1] for item in self._heap) - - def __len__(self): - return len(self._heap) - - def __contains__(self, key): - return key in self._data - - def __reduce__(self): - return self.__class__, (self.maxlen, self.expires, self._data) -MutableSet.register(LimitedSet) diff --git a/thesisenv/lib/python3.6/site-packages/celery/events/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/events/__init__.py deleted file mode 100644 index 65809cf..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/events/__init__.py +++ /dev/null @@ -1,408 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.events - ~~~~~~~~~~~~~ - - Events is a stream of messages sent for certain actions occurring - in the worker (and clients if :setting:`CELERY_SEND_TASK_SENT_EVENT` - is enabled), used for monitoring purposes. - -""" -from __future__ import absolute_import - -import os -import time -import threading -import warnings - -from collections import deque -from contextlib import contextmanager -from copy import copy -from operator import itemgetter - -from kombu import Exchange, Queue, Producer -from kombu.connection import maybe_channel -from kombu.mixins import ConsumerMixin -from kombu.utils import cached_property - -from celery.app import app_or_default -from celery.utils import anon_nodename, uuid -from celery.utils.functional import dictfilter -from celery.utils.timeutils import adjust_timestamp, utcoffset, maybe_s_to_ms - -__all__ = ['Events', 'Event', 'EventDispatcher', 'EventReceiver'] - -event_exchange = Exchange('celeryev', type='topic') - -_TZGETTER = itemgetter('utcoffset', 'timestamp') - -W_YAJL = """ -anyjson is currently using the yajl library. -This json implementation is broken, it severely truncates floats -so timestamps will not work. - -Please uninstall yajl or force anyjson to use a different library. -""" - -CLIENT_CLOCK_SKEW = -1 - - -def get_exchange(conn): - ex = copy(event_exchange) - if conn.transport.driver_type == 'redis': - # quick hack for Issue #436 - ex.type = 'fanout' - return ex - - -def Event(type, _fields=None, __dict__=dict, __now__=time.time, **fields): - """Create an event. - - An event is a dictionary, the only required field is ``type``. - A ``timestamp`` field will be set to the current time if not provided. - - """ - event = __dict__(_fields, **fields) if _fields else fields - if 'timestamp' not in event: - event.update(timestamp=__now__(), type=type) - else: - event['type'] = type - return event - - -def group_from(type): - """Get the group part of an event type name. - - E.g.:: - - >>> group_from('task-sent') - 'task' - - >>> group_from('custom-my-event') - 'custom' - - """ - return type.split('-', 1)[0] - - -class EventDispatcher(object): - """Dispatches event messages. - - :param connection: Connection to the broker. - - :keyword hostname: Hostname to identify ourselves as, - by default uses the hostname returned by - :func:`~celery.utils.anon_nodename`. - - :keyword groups: List of groups to send events for. :meth:`send` will - ignore send requests to groups not in this list. - If this is :const:`None`, all events will be sent. Example groups - include ``"task"`` and ``"worker"``. - - :keyword enabled: Set to :const:`False` to not actually publish any events, - making :meth:`send` a noop operation. - - :keyword channel: Can be used instead of `connection` to specify - an exact channel to use when sending events. - - :keyword buffer_while_offline: If enabled events will be buffered - while the connection is down. :meth:`flush` must be called - as soon as the connection is re-established. - - You need to :meth:`close` this after use. - - """ - DISABLED_TRANSPORTS = set(['sql']) - - app = None - - # set of callbacks to be called when :meth:`enabled`. - on_enabled = None - - # set of callbacks to be called when :meth:`disabled`. - on_disabled = None - - def __init__(self, connection=None, hostname=None, enabled=True, - channel=None, buffer_while_offline=True, app=None, - serializer=None, groups=None): - self.app = app_or_default(app or self.app) - self.connection = connection - self.channel = channel - self.hostname = hostname or anon_nodename() - self.buffer_while_offline = buffer_while_offline - self.mutex = threading.Lock() - self.producer = None - self._outbound_buffer = deque() - self.serializer = serializer or self.app.conf.CELERY_EVENT_SERIALIZER - self.on_enabled = set() - self.on_disabled = set() - self.groups = set(groups or []) - self.tzoffset = [-time.timezone, -time.altzone] - self.clock = self.app.clock - if not connection and channel: - self.connection = channel.connection.client - self.enabled = enabled - conninfo = self.connection or self.app.connection() - self.exchange = get_exchange(conninfo) - if conninfo.transport.driver_type in self.DISABLED_TRANSPORTS: - self.enabled = False - if self.enabled: - self.enable() - self.headers = {'hostname': self.hostname} - self.pid = os.getpid() - self.warn_if_yajl() - - def warn_if_yajl(self): - import anyjson - if anyjson.implementation.name == 'yajl': - warnings.warn(UserWarning(W_YAJL)) - - def __enter__(self): - return self - - def __exit__(self, *exc_info): - self.close() - - def enable(self): - self.producer = Producer(self.channel or self.connection, - exchange=self.exchange, - serializer=self.serializer) - self.enabled = True - for callback in self.on_enabled: - callback() - - def disable(self): - if self.enabled: - self.enabled = False - self.close() - for callback in self.on_disabled: - callback() - - def publish(self, type, fields, producer, retry=False, - retry_policy=None, blind=False, utcoffset=utcoffset, - Event=Event): - """Publish event using a custom :class:`~kombu.Producer` - instance. - - :param type: Event type name, with group separated by dash (`-`). - :param fields: Dictionary of event fields, must be json serializable. - :param producer: :class:`~kombu.Producer` instance to use, - only the ``publish`` method will be called. - :keyword retry: Retry in the event of connection failure. - :keyword retry_policy: Dict of custom retry policy, see - :meth:`~kombu.Connection.ensure`. - :keyword blind: Don't set logical clock value (also do not forward - the internal logical clock). - :keyword Event: Event type used to create event, - defaults to :func:`Event`. - :keyword utcoffset: Function returning the current utcoffset in hours. - - """ - - with self.mutex: - clock = None if blind else self.clock.forward() - event = Event(type, hostname=self.hostname, utcoffset=utcoffset(), - pid=self.pid, clock=clock, **fields) - exchange = self.exchange - producer.publish( - event, - routing_key=type.replace('-', '.'), - exchange=exchange.name, - retry=retry, - retry_policy=retry_policy, - declare=[exchange], - serializer=self.serializer, - headers=self.headers, - ) - - def send(self, type, blind=False, **fields): - """Send event. - - :param type: Event type name, with group separated by dash (`-`). - :keyword retry: Retry in the event of connection failure. - :keyword retry_policy: Dict of custom retry policy, see - :meth:`~kombu.Connection.ensure`. - :keyword blind: Don't set logical clock value (also do not forward - the internal logical clock). - :keyword Event: Event type used to create event, - defaults to :func:`Event`. - :keyword utcoffset: Function returning the current utcoffset in hours. - :keyword \*\*fields: Event fields, must be json serializable. - - """ - if self.enabled: - groups = self.groups - if groups and group_from(type) not in groups: - return - try: - self.publish(type, fields, self.producer, blind) - except Exception as exc: - if not self.buffer_while_offline: - raise - self._outbound_buffer.append((type, fields, exc)) - - def flush(self): - """Flushes the outbound buffer.""" - while self._outbound_buffer: - try: - type, fields, _ = self._outbound_buffer.popleft() - except IndexError: - return - self.send(type, **fields) - - def extend_buffer(self, other): - """Copies the outbound buffer of another instance.""" - self._outbound_buffer.extend(other._outbound_buffer) - - def close(self): - """Close the event dispatcher.""" - self.mutex.locked() and self.mutex.release() - self.producer = None - - def _get_publisher(self): - return self.producer - - def _set_publisher(self, producer): - self.producer = producer - publisher = property(_get_publisher, _set_publisher) # XXX compat - - -class EventReceiver(ConsumerMixin): - """Capture events. - - :param connection: Connection to the broker. - :keyword handlers: Event handlers. - - :attr:`handlers` is a dict of event types and their handlers, - the special handler `"*"` captures all events that doesn't have a - handler. - - """ - app = None - - def __init__(self, channel, handlers=None, routing_key='#', - node_id=None, app=None, queue_prefix='celeryev', - accept=None): - self.app = app_or_default(app or self.app) - self.channel = maybe_channel(channel) - self.handlers = {} if handlers is None else handlers - self.routing_key = routing_key - self.node_id = node_id or uuid() - self.queue_prefix = queue_prefix - self.exchange = get_exchange(self.connection or self.app.connection()) - self.queue = Queue('.'.join([self.queue_prefix, self.node_id]), - exchange=self.exchange, - routing_key=self.routing_key, - auto_delete=True, - durable=False, - queue_arguments=self._get_queue_arguments()) - self.clock = self.app.clock - self.adjust_clock = self.clock.adjust - self.forward_clock = self.clock.forward - if accept is None: - accept = set([self.app.conf.CELERY_EVENT_SERIALIZER, 'json']) - self.accept = accept - - def _get_queue_arguments(self): - conf = self.app.conf - return dictfilter({ - 'x-message-ttl': maybe_s_to_ms(conf.CELERY_EVENT_QUEUE_TTL), - 'x-expires': maybe_s_to_ms(conf.CELERY_EVENT_QUEUE_EXPIRES), - }) - - def process(self, type, event): - """Process the received event by dispatching it to the appropriate - handler.""" - handler = self.handlers.get(type) or self.handlers.get('*') - handler and handler(event) - - def get_consumers(self, Consumer, channel): - return [Consumer(queues=[self.queue], - callbacks=[self._receive], no_ack=True, - accept=self.accept)] - - def on_consume_ready(self, connection, channel, consumers, - wakeup=True, **kwargs): - if wakeup: - self.wakeup_workers(channel=channel) - - def itercapture(self, limit=None, timeout=None, wakeup=True): - return self.consume(limit=limit, timeout=timeout, wakeup=wakeup) - - def capture(self, limit=None, timeout=None, wakeup=True): - """Open up a consumer capturing events. - - This has to run in the main process, and it will never stop - unless :attr:`EventDispatcher.should_stop` is set to True, or - forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`. - - """ - return list(self.consume(limit=limit, timeout=timeout, wakeup=wakeup)) - - def wakeup_workers(self, channel=None): - self.app.control.broadcast('heartbeat', - connection=self.connection, - channel=channel) - - def event_from_message(self, body, localize=True, - now=time.time, tzfields=_TZGETTER, - adjust_timestamp=adjust_timestamp, - CLIENT_CLOCK_SKEW=CLIENT_CLOCK_SKEW): - type = body['type'] - if type == 'task-sent': - # clients never sync so cannot use their clock value - _c = body['clock'] = (self.clock.value or 1) + CLIENT_CLOCK_SKEW - self.adjust_clock(_c) - else: - try: - clock = body['clock'] - except KeyError: - body['clock'] = self.forward_clock() - else: - self.adjust_clock(clock) - - if localize: - try: - offset, timestamp = tzfields(body) - except KeyError: - pass - else: - body['timestamp'] = adjust_timestamp(timestamp, offset) - body['local_received'] = now() - return type, body - - def _receive(self, body, message): - self.process(*self.event_from_message(body)) - - @property - def connection(self): - return self.channel.connection.client if self.channel else None - - -class Events(object): - - def __init__(self, app=None): - self.app = app - - @cached_property - def Receiver(self): - return self.app.subclass_with_self(EventReceiver, - reverse='events.Receiver') - - @cached_property - def Dispatcher(self): - return self.app.subclass_with_self(EventDispatcher, - reverse='events.Dispatcher') - - @cached_property - def State(self): - return self.app.subclass_with_self('celery.events.state:State', - reverse='events.State') - - @contextmanager - def default_dispatcher(self, hostname=None, enabled=True, - buffer_while_offline=False): - with self.app.amqp.producer_pool.acquire(block=True) as prod: - with self.Dispatcher(prod.connection, hostname, enabled, - prod.channel, buffer_while_offline) as d: - yield d diff --git a/thesisenv/lib/python3.6/site-packages/celery/events/cursesmon.py b/thesisenv/lib/python3.6/site-packages/celery/events/cursesmon.py deleted file mode 100644 index 775f6a0..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/events/cursesmon.py +++ /dev/null @@ -1,544 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.events.cursesmon - ~~~~~~~~~~~~~~~~~~~~~~~ - - Graphical monitor of Celery events using curses. - -""" -from __future__ import absolute_import, print_function - -import curses -import sys -import threading - -from datetime import datetime -from itertools import count -from textwrap import wrap -from time import time -from math import ceil - -from celery import VERSION_BANNER -from celery import states -from celery.app import app_or_default -from celery.five import items, values -from celery.utils.text import abbr, abbrtask - -__all__ = ['CursesMonitor', 'evtop'] - -BORDER_SPACING = 4 -LEFT_BORDER_OFFSET = 3 -UUID_WIDTH = 36 -STATE_WIDTH = 8 -TIMESTAMP_WIDTH = 8 -MIN_WORKER_WIDTH = 15 -MIN_TASK_WIDTH = 16 - -# this module is considered experimental -# we don't care about coverage. - -STATUS_SCREEN = """\ -events: {s.event_count} tasks:{s.task_count} workers:{w_alive}/{w_all} -""" - - -class CursesMonitor(object): # pragma: no cover - keymap = {} - win = None - screen_width = None - screen_delay = 10 - selected_task = None - selected_position = 0 - selected_str = 'Selected: ' - foreground = curses.COLOR_BLACK - background = curses.COLOR_WHITE - online_str = 'Workers online: ' - help_title = 'Keys: ' - help = ('j:down k:up i:info t:traceback r:result c:revoke ^c: quit') - greet = 'celery events {0}'.format(VERSION_BANNER) - info_str = 'Info: ' - - def __init__(self, state, app, keymap=None): - self.app = app - self.keymap = keymap or self.keymap - self.state = state - default_keymap = {'J': self.move_selection_down, - 'K': self.move_selection_up, - 'C': self.revoke_selection, - 'T': self.selection_traceback, - 'R': self.selection_result, - 'I': self.selection_info, - 'L': self.selection_rate_limit} - self.keymap = dict(default_keymap, **self.keymap) - self.lock = threading.RLock() - - def format_row(self, uuid, task, worker, timestamp, state): - mx = self.display_width - - # include spacing - detail_width = mx - 1 - STATE_WIDTH - 1 - TIMESTAMP_WIDTH - uuid_space = detail_width - 1 - MIN_TASK_WIDTH - 1 - MIN_WORKER_WIDTH - - if uuid_space < UUID_WIDTH: - uuid_width = uuid_space - else: - uuid_width = UUID_WIDTH - - detail_width = detail_width - uuid_width - 1 - task_width = int(ceil(detail_width / 2.0)) - worker_width = detail_width - task_width - 1 - - uuid = abbr(uuid, uuid_width).ljust(uuid_width) - worker = abbr(worker, worker_width).ljust(worker_width) - task = abbrtask(task, task_width).ljust(task_width) - state = abbr(state, STATE_WIDTH).ljust(STATE_WIDTH) - timestamp = timestamp.ljust(TIMESTAMP_WIDTH) - - row = '{0} {1} {2} {3} {4} '.format(uuid, worker, task, - timestamp, state) - if self.screen_width is None: - self.screen_width = len(row[:mx]) - return row[:mx] - - @property - def screen_width(self): - _, mx = self.win.getmaxyx() - return mx - - @property - def screen_height(self): - my, _ = self.win.getmaxyx() - return my - - @property - def display_width(self): - _, mx = self.win.getmaxyx() - return mx - BORDER_SPACING - - @property - def display_height(self): - my, _ = self.win.getmaxyx() - return my - 10 - - @property - def limit(self): - return self.display_height - - def find_position(self): - if not self.tasks: - return 0 - for i, e in enumerate(self.tasks): - if self.selected_task == e[0]: - return i - return 0 - - def move_selection_up(self): - self.move_selection(-1) - - def move_selection_down(self): - self.move_selection(1) - - def move_selection(self, direction=1): - if not self.tasks: - return - pos = self.find_position() - try: - self.selected_task = self.tasks[pos + direction][0] - except IndexError: - self.selected_task = self.tasks[0][0] - - keyalias = {curses.KEY_DOWN: 'J', - curses.KEY_UP: 'K', - curses.KEY_ENTER: 'I'} - - def handle_keypress(self): - try: - key = self.win.getkey().upper() - except: - return - key = self.keyalias.get(key) or key - handler = self.keymap.get(key) - if handler is not None: - handler() - - def alert(self, callback, title=None): - self.win.erase() - my, mx = self.win.getmaxyx() - y = blank_line = count(2) - if title: - self.win.addstr(next(y), 3, title, - curses.A_BOLD | curses.A_UNDERLINE) - next(blank_line) - callback(my, mx, next(y)) - self.win.addstr(my - 1, 0, 'Press any key to continue...', - curses.A_BOLD) - self.win.refresh() - while 1: - try: - return self.win.getkey().upper() - except: - pass - - def selection_rate_limit(self): - if not self.selected_task: - return curses.beep() - task = self.state.tasks[self.selected_task] - if not task.name: - return curses.beep() - - my, mx = self.win.getmaxyx() - r = 'New rate limit: ' - self.win.addstr(my - 2, 3, r, curses.A_BOLD | curses.A_UNDERLINE) - self.win.addstr(my - 2, len(r) + 3, ' ' * (mx - len(r))) - rlimit = self.readline(my - 2, 3 + len(r)) - - if rlimit: - reply = self.app.control.rate_limit(task.name, - rlimit.strip(), reply=True) - self.alert_remote_control_reply(reply) - - def alert_remote_control_reply(self, reply): - - def callback(my, mx, xs): - y = count(xs) - if not reply: - self.win.addstr( - next(y), 3, 'No replies received in 1s deadline.', - curses.A_BOLD + curses.color_pair(2), - ) - return - - for subreply in reply: - curline = next(y) - - host, response = next(items(subreply)) - host = '{0}: '.format(host) - self.win.addstr(curline, 3, host, curses.A_BOLD) - attr = curses.A_NORMAL - text = '' - if 'error' in response: - text = response['error'] - attr |= curses.color_pair(2) - elif 'ok' in response: - text = response['ok'] - attr |= curses.color_pair(3) - self.win.addstr(curline, 3 + len(host), text, attr) - - return self.alert(callback, 'Remote Control Command Replies') - - def readline(self, x, y): - buffer = str() - curses.echo() - try: - i = 0 - while 1: - ch = self.win.getch(x, y + i) - if ch != -1: - if ch in (10, curses.KEY_ENTER): # enter - break - if ch in (27, ): - buffer = str() - break - buffer += chr(ch) - i += 1 - finally: - curses.noecho() - return buffer - - def revoke_selection(self): - if not self.selected_task: - return curses.beep() - reply = self.app.control.revoke(self.selected_task, reply=True) - self.alert_remote_control_reply(reply) - - def selection_info(self): - if not self.selected_task: - return - - def alert_callback(mx, my, xs): - my, mx = self.win.getmaxyx() - y = count(xs) - task = self.state.tasks[self.selected_task] - info = task.info(extra=['state']) - infoitems = [ - ('args', info.pop('args', None)), - ('kwargs', info.pop('kwargs', None)) - ] + list(info.items()) - for key, value in infoitems: - if key is None: - continue - value = str(value) - curline = next(y) - keys = key + ': ' - self.win.addstr(curline, 3, keys, curses.A_BOLD) - wrapped = wrap(value, mx - 2) - if len(wrapped) == 1: - self.win.addstr( - curline, len(keys) + 3, - abbr(wrapped[0], - self.screen_width - (len(keys) + 3))) - else: - for subline in wrapped: - nexty = next(y) - if nexty >= my - 1: - subline = ' ' * 4 + '[...]' - elif nexty >= my: - break - self.win.addstr( - nexty, 3, - abbr(' ' * 4 + subline, self.screen_width - 4), - curses.A_NORMAL, - ) - - return self.alert( - alert_callback, 'Task details for {0.selected_task}'.format(self), - ) - - def selection_traceback(self): - if not self.selected_task: - return curses.beep() - task = self.state.tasks[self.selected_task] - if task.state not in states.EXCEPTION_STATES: - return curses.beep() - - def alert_callback(my, mx, xs): - y = count(xs) - for line in task.traceback.split('\n'): - self.win.addstr(next(y), 3, line) - - return self.alert( - alert_callback, - 'Task Exception Traceback for {0.selected_task}'.format(self), - ) - - def selection_result(self): - if not self.selected_task: - return - - def alert_callback(my, mx, xs): - y = count(xs) - task = self.state.tasks[self.selected_task] - result = (getattr(task, 'result', None) or - getattr(task, 'exception', None)) - for line in wrap(result or '', mx - 2): - self.win.addstr(next(y), 3, line) - - return self.alert( - alert_callback, - 'Task Result for {0.selected_task}'.format(self), - ) - - def display_task_row(self, lineno, task): - state_color = self.state_colors.get(task.state) - attr = curses.A_NORMAL - if task.uuid == self.selected_task: - attr = curses.A_STANDOUT - timestamp = datetime.utcfromtimestamp( - task.timestamp or time(), - ) - timef = timestamp.strftime('%H:%M:%S') - hostname = task.worker.hostname if task.worker else '*NONE*' - line = self.format_row(task.uuid, task.name, - hostname, - timef, task.state) - self.win.addstr(lineno, LEFT_BORDER_OFFSET, line, attr) - - if state_color: - self.win.addstr(lineno, - len(line) - STATE_WIDTH + BORDER_SPACING - 1, - task.state, state_color | attr) - - def draw(self): - with self.lock: - win = self.win - self.handle_keypress() - x = LEFT_BORDER_OFFSET - y = blank_line = count(2) - my, mx = win.getmaxyx() - win.erase() - win.bkgd(' ', curses.color_pair(1)) - win.border() - win.addstr(1, x, self.greet, curses.A_DIM | curses.color_pair(5)) - next(blank_line) - win.addstr(next(y), x, self.format_row('UUID', 'TASK', - 'WORKER', 'TIME', 'STATE'), - curses.A_BOLD | curses.A_UNDERLINE) - tasks = self.tasks - if tasks: - for row, (uuid, task) in enumerate(tasks): - if row > self.display_height: - break - - if task.uuid: - lineno = next(y) - self.display_task_row(lineno, task) - - # -- Footer - next(blank_line) - win.hline(my - 6, x, curses.ACS_HLINE, self.screen_width - 4) - - # Selected Task Info - if self.selected_task: - win.addstr(my - 5, x, self.selected_str, curses.A_BOLD) - info = 'Missing extended info' - detail = '' - try: - selection = self.state.tasks[self.selected_task] - except KeyError: - pass - else: - info = selection.info() - if 'runtime' in info: - info['runtime'] = '{0:.2f}'.format(info['runtime']) - if 'result' in info: - info['result'] = abbr(info['result'], 16) - info = ' '.join( - '{0}={1}'.format(key, value) - for key, value in items(info) - ) - detail = '... -> key i' - infowin = abbr(info, - self.screen_width - len(self.selected_str) - 2, - detail) - win.addstr(my - 5, x + len(self.selected_str), infowin) - # Make ellipsis bold - if detail in infowin: - detailpos = len(infowin) - len(detail) - win.addstr(my - 5, x + len(self.selected_str) + detailpos, - detail, curses.A_BOLD) - else: - win.addstr(my - 5, x, 'No task selected', curses.A_NORMAL) - - # Workers - if self.workers: - win.addstr(my - 4, x, self.online_str, curses.A_BOLD) - win.addstr(my - 4, x + len(self.online_str), - ', '.join(sorted(self.workers)), curses.A_NORMAL) - else: - win.addstr(my - 4, x, 'No workers discovered.') - - # Info - win.addstr(my - 3, x, self.info_str, curses.A_BOLD) - win.addstr( - my - 3, x + len(self.info_str), - STATUS_SCREEN.format( - s=self.state, - w_alive=len([w for w in values(self.state.workers) - if w.alive]), - w_all=len(self.state.workers), - ), - curses.A_DIM, - ) - - # Help - self.safe_add_str(my - 2, x, self.help_title, curses.A_BOLD) - self.safe_add_str(my - 2, x + len(self.help_title), self.help, - curses.A_DIM) - win.refresh() - - def safe_add_str(self, y, x, string, *args, **kwargs): - if x + len(string) > self.screen_width: - string = string[:self.screen_width - x] - self.win.addstr(y, x, string, *args, **kwargs) - - def init_screen(self): - with self.lock: - self.win = curses.initscr() - self.win.nodelay(True) - self.win.keypad(True) - curses.start_color() - curses.init_pair(1, self.foreground, self.background) - # exception states - curses.init_pair(2, curses.COLOR_RED, self.background) - # successful state - curses.init_pair(3, curses.COLOR_GREEN, self.background) - # revoked state - curses.init_pair(4, curses.COLOR_MAGENTA, self.background) - # greeting - curses.init_pair(5, curses.COLOR_BLUE, self.background) - # started state - curses.init_pair(6, curses.COLOR_YELLOW, self.foreground) - - self.state_colors = {states.SUCCESS: curses.color_pair(3), - states.REVOKED: curses.color_pair(4), - states.STARTED: curses.color_pair(6)} - for state in states.EXCEPTION_STATES: - self.state_colors[state] = curses.color_pair(2) - - curses.cbreak() - - def resetscreen(self): - with self.lock: - curses.nocbreak() - self.win.keypad(False) - curses.echo() - curses.endwin() - - def nap(self): - curses.napms(self.screen_delay) - - @property - def tasks(self): - return list(self.state.tasks_by_time(limit=self.limit)) - - @property - def workers(self): - return [hostname for hostname, w in items(self.state.workers) - if w.alive] - - -class DisplayThread(threading.Thread): # pragma: no cover - - def __init__(self, display): - self.display = display - self.shutdown = False - threading.Thread.__init__(self) - - def run(self): - while not self.shutdown: - self.display.draw() - self.display.nap() - - -def capture_events(app, state, display): # pragma: no cover - - def on_connection_error(exc, interval): - print('Connection Error: {0!r}. Retry in {1}s.'.format( - exc, interval), file=sys.stderr) - - while 1: - print('-> evtop: starting capture...', file=sys.stderr) - with app.connection() as conn: - try: - conn.ensure_connection(on_connection_error, - app.conf.BROKER_CONNECTION_MAX_RETRIES) - recv = app.events.Receiver(conn, handlers={'*': state.event}) - display.resetscreen() - display.init_screen() - recv.capture() - except conn.connection_errors + conn.channel_errors as exc: - print('Connection lost: {0!r}'.format(exc), file=sys.stderr) - - -def evtop(app=None): # pragma: no cover - app = app_or_default(app) - state = app.events.State() - display = CursesMonitor(state, app) - display.init_screen() - refresher = DisplayThread(display) - refresher.start() - try: - capture_events(app, state, display) - except Exception: - refresher.shutdown = True - refresher.join() - display.resetscreen() - raise - except (KeyboardInterrupt, SystemExit): - refresher.shutdown = True - refresher.join() - display.resetscreen() - - -if __name__ == '__main__': # pragma: no cover - evtop() diff --git a/thesisenv/lib/python3.6/site-packages/celery/events/dumper.py b/thesisenv/lib/python3.6/site-packages/celery/events/dumper.py deleted file mode 100644 index 323afc4..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/events/dumper.py +++ /dev/null @@ -1,109 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.events.dumper - ~~~~~~~~~~~~~~~~~~~~ - - This is a simple program that dumps events to the console - as they happen. Think of it like a `tcpdump` for Celery events. - -""" -from __future__ import absolute_import, print_function - -import sys - -from datetime import datetime - -from celery.app import app_or_default -from celery.utils.functional import LRUCache -from celery.utils.timeutils import humanize_seconds - -__all__ = ['Dumper', 'evdump'] - -TASK_NAMES = LRUCache(limit=0xFFF) - -HUMAN_TYPES = {'worker-offline': 'shutdown', - 'worker-online': 'started', - 'worker-heartbeat': 'heartbeat'} - -CONNECTION_ERROR = """\ --> Cannot connect to %s: %s. -Trying again %s -""" - - -def humanize_type(type): - try: - return HUMAN_TYPES[type.lower()] - except KeyError: - return type.lower().replace('-', ' ') - - -class Dumper(object): - - def __init__(self, out=sys.stdout): - self.out = out - - def say(self, msg): - print(msg, file=self.out) - # need to flush so that output can be piped. - try: - self.out.flush() - except AttributeError: - pass - - def on_event(self, ev): - timestamp = datetime.utcfromtimestamp(ev.pop('timestamp')) - type = ev.pop('type').lower() - hostname = ev.pop('hostname') - if type.startswith('task-'): - uuid = ev.pop('uuid') - if type in ('task-received', 'task-sent'): - task = TASK_NAMES[uuid] = '{0}({1}) args={2} kwargs={3}' \ - .format(ev.pop('name'), uuid, - ev.pop('args'), - ev.pop('kwargs')) - else: - task = TASK_NAMES.get(uuid, '') - return self.format_task_event(hostname, timestamp, - type, task, ev) - fields = ', '.join( - '{0}={1}'.format(key, ev[key]) for key in sorted(ev) - ) - sep = fields and ':' or '' - self.say('{0} [{1}] {2}{3} {4}'.format( - hostname, timestamp, humanize_type(type), sep, fields), - ) - - def format_task_event(self, hostname, timestamp, type, task, event): - fields = ', '.join( - '{0}={1}'.format(key, event[key]) for key in sorted(event) - ) - sep = fields and ':' or '' - self.say('{0} [{1}] {2}{3} {4} {5}'.format( - hostname, timestamp, humanize_type(type), sep, task, fields), - ) - - -def evdump(app=None, out=sys.stdout): - app = app_or_default(app) - dumper = Dumper(out=out) - dumper.say('-> evdump: starting capture...') - conn = app.connection().clone() - - def _error_handler(exc, interval): - dumper.say(CONNECTION_ERROR % ( - conn.as_uri(), exc, humanize_seconds(interval, 'in', ' ') - )) - - while 1: - try: - conn.ensure_connection(_error_handler) - recv = app.events.Receiver(conn, handlers={'*': dumper.on_event}) - recv.capture() - except (KeyboardInterrupt, SystemExit): - return conn and conn.close() - except conn.connection_errors + conn.channel_errors: - dumper.say('-> Connection lost, attempting reconnect') - -if __name__ == '__main__': # pragma: no cover - evdump() diff --git a/thesisenv/lib/python3.6/site-packages/celery/events/snapshot.py b/thesisenv/lib/python3.6/site-packages/celery/events/snapshot.py deleted file mode 100644 index 0dd4155..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/events/snapshot.py +++ /dev/null @@ -1,114 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.events.snapshot - ~~~~~~~~~~~~~~~~~~~~~~ - - Consuming the events as a stream is not always suitable - so this module implements a system to take snapshots of the - state of a cluster at regular intervals. There is a full - implementation of this writing the snapshots to a database - in :mod:`djcelery.snapshots` in the `django-celery` distribution. - -""" -from __future__ import absolute_import - -from kombu.utils.limits import TokenBucket - -from celery import platforms -from celery.app import app_or_default -from celery.utils.timer2 import Timer -from celery.utils.dispatch import Signal -from celery.utils.imports import instantiate -from celery.utils.log import get_logger -from celery.utils.timeutils import rate - -__all__ = ['Polaroid', 'evcam'] - -logger = get_logger('celery.evcam') - - -class Polaroid(object): - timer = None - shutter_signal = Signal(providing_args=('state', )) - cleanup_signal = Signal() - clear_after = False - - _tref = None - _ctref = None - - def __init__(self, state, freq=1.0, maxrate=None, - cleanup_freq=3600.0, timer=None, app=None): - self.app = app_or_default(app) - self.state = state - self.freq = freq - self.cleanup_freq = cleanup_freq - self.timer = timer or self.timer or Timer() - self.logger = logger - self.maxrate = maxrate and TokenBucket(rate(maxrate)) - - def install(self): - self._tref = self.timer.call_repeatedly(self.freq, self.capture) - self._ctref = self.timer.call_repeatedly( - self.cleanup_freq, self.cleanup, - ) - - def on_shutter(self, state): - pass - - def on_cleanup(self): - pass - - def cleanup(self): - logger.debug('Cleanup: Running...') - self.cleanup_signal.send(None) - self.on_cleanup() - - def shutter(self): - if self.maxrate is None or self.maxrate.can_consume(): - logger.debug('Shutter: %s', self.state) - self.shutter_signal.send(self.state) - self.on_shutter(self.state) - - def capture(self): - self.state.freeze_while(self.shutter, clear_after=self.clear_after) - - def cancel(self): - if self._tref: - self._tref() # flush all received events. - self._tref.cancel() - if self._ctref: - self._ctref.cancel() - - def __enter__(self): - self.install() - return self - - def __exit__(self, *exc_info): - self.cancel() - - -def evcam(camera, freq=1.0, maxrate=None, loglevel=0, - logfile=None, pidfile=None, timer=None, app=None): - app = app_or_default(app) - - if pidfile: - platforms.create_pidlock(pidfile) - - app.log.setup_logging_subsystem(loglevel, logfile) - - print('-> evcam: Taking snapshots with {0} (every {1} secs.)'.format( - camera, freq)) - state = app.events.State() - cam = instantiate(camera, state, app=app, freq=freq, - maxrate=maxrate, timer=timer) - cam.install() - conn = app.connection() - recv = app.events.Receiver(conn, handlers={'*': state.event}) - try: - try: - recv.capture(limit=None) - except KeyboardInterrupt: - raise SystemExit - finally: - cam.cancel() - conn.close() diff --git a/thesisenv/lib/python3.6/site-packages/celery/events/state.py b/thesisenv/lib/python3.6/site-packages/celery/events/state.py deleted file mode 100644 index c78f2d0..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/events/state.py +++ /dev/null @@ -1,656 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.events.state - ~~~~~~~~~~~~~~~~~~~ - - This module implements a datastructure used to keep - track of the state of a cluster of workers and the tasks - it is working on (by consuming events). - - For every event consumed the state is updated, - so the state represents the state of the cluster - at the time of the last event. - - Snapshots (:mod:`celery.events.snapshot`) can be used to - take "pictures" of this state at regular intervals - to e.g. store that in a database. - -""" -from __future__ import absolute_import - -import bisect -import sys -import threading - -from datetime import datetime -from decimal import Decimal -from itertools import islice -from operator import itemgetter -from time import time -from weakref import ref - -from kombu.clocks import timetuple -from kombu.utils import cached_property, kwdict - -from celery import states -from celery.five import class_property, items, values -from celery.utils import deprecated -from celery.utils.functional import LRUCache, memoize -from celery.utils.log import get_logger - -PYPY = hasattr(sys, 'pypy_version_info') - -# The window (in percentage) is added to the workers heartbeat -# frequency. If the time between updates exceeds this window, -# then the worker is considered to be offline. -HEARTBEAT_EXPIRE_WINDOW = 200 - -# Max drift between event timestamp and time of event received -# before we alert that clocks may be unsynchronized. -HEARTBEAT_DRIFT_MAX = 16 - -DRIFT_WARNING = """\ -Substantial drift from %s may mean clocks are out of sync. Current drift is -%s seconds. [orig: %s recv: %s] -""" - -CAN_KWDICT = sys.version_info >= (2, 6, 5) - -logger = get_logger(__name__) -warn = logger.warning - -R_STATE = '' -R_WORKER = ' HEARTBEAT_DRIFT_MAX: - _warn_drift(self.hostname, drift, - local_received, timestamp) - if local_received: - hearts = len(heartbeats) - if hearts > hbmax - 1: - hb_pop(0) - if hearts and local_received > heartbeats[-1]: - hb_append(local_received) - else: - insort(heartbeats, local_received) - return event - - def update(self, f, **kw): - for k, v in items(dict(f, **kw) if kw else f): - setattr(self, k, v) - - def __repr__(self): - return R_WORKER.format(self) - - @property - def status_string(self): - return 'ONLINE' if self.alive else 'OFFLINE' - - @property - def heartbeat_expires(self): - return heartbeat_expires(self.heartbeats[-1], - self.freq, self.expire_window) - - @property - def alive(self, nowfun=time): - return bool(self.heartbeats and nowfun() < self.heartbeat_expires) - - @property - def id(self): - return '{0.hostname}.{0.pid}'.format(self) - - @deprecated(3.2, 3.3) - def update_heartbeat(self, received, timestamp): - self.event(None, timestamp, received) - - @deprecated(3.2, 3.3) - def on_online(self, timestamp=None, local_received=None, **fields): - self.event('online', timestamp, local_received, fields) - - @deprecated(3.2, 3.3) - def on_offline(self, timestamp=None, local_received=None, **fields): - self.event('offline', timestamp, local_received, fields) - - @deprecated(3.2, 3.3) - def on_heartbeat(self, timestamp=None, local_received=None, **fields): - self.event('heartbeat', timestamp, local_received, fields) - - @class_property - def _defaults(cls): - """Deprecated, to be removed in 3.3""" - source = cls() - return dict((k, getattr(source, k)) for k in cls._fields) - - -@with_unique_field('uuid') -class Task(object): - """Task State.""" - name = received = sent = started = succeeded = failed = retried = \ - revoked = args = kwargs = eta = expires = retries = worker = result = \ - exception = timestamp = runtime = traceback = exchange = \ - routing_key = client = None - state = states.PENDING - clock = 0 - - _fields = ('uuid', 'name', 'state', 'received', 'sent', 'started', - 'succeeded', 'failed', 'retried', 'revoked', 'args', 'kwargs', - 'eta', 'expires', 'retries', 'worker', 'result', 'exception', - 'timestamp', 'runtime', 'traceback', 'exchange', 'routing_key', - 'clock', 'client') - if not PYPY: - __slots__ = ('__dict__', '__weakref__') - - #: How to merge out of order events. - #: Disorder is detected by logical ordering (e.g. :event:`task-received` - #: must have happened before a :event:`task-failed` event). - #: - #: A merge rule consists of a state and a list of fields to keep from - #: that state. ``(RECEIVED, ('name', 'args')``, means the name and args - #: fields are always taken from the RECEIVED state, and any values for - #: these fields received before or after is simply ignored. - merge_rules = {states.RECEIVED: ('name', 'args', 'kwargs', - 'retries', 'eta', 'expires')} - - #: meth:`info` displays these fields by default. - _info_fields = ('args', 'kwargs', 'retries', 'result', 'eta', 'runtime', - 'expires', 'exception', 'exchange', 'routing_key') - - def __init__(self, uuid=None, **kwargs): - self.uuid = uuid - if kwargs: - for k, v in items(kwargs): - setattr(self, k, v) - - def event(self, type_, timestamp=None, local_received=None, fields=None, - precedence=states.precedence, items=items, dict=dict, - PENDING=states.PENDING, RECEIVED=states.RECEIVED, - STARTED=states.STARTED, FAILURE=states.FAILURE, - RETRY=states.RETRY, SUCCESS=states.SUCCESS, - REVOKED=states.REVOKED): - fields = fields or {} - if type_ == 'sent': - state, self.sent = PENDING, timestamp - elif type_ == 'received': - state, self.received = RECEIVED, timestamp - elif type_ == 'started': - state, self.started = STARTED, timestamp - elif type_ == 'failed': - state, self.failed = FAILURE, timestamp - elif type_ == 'retried': - state, self.retried = RETRY, timestamp - elif type_ == 'succeeded': - state, self.succeeded = SUCCESS, timestamp - elif type_ == 'revoked': - state, self.revoked = REVOKED, timestamp - else: - state = type_.upper() - - # note that precedence here is reversed - # see implementation in celery.states.state.__lt__ - if state != RETRY and self.state != RETRY and \ - precedence(state) > precedence(self.state): - # this state logically happens-before the current state, so merge. - keep = self.merge_rules.get(state) - if keep is not None: - fields = dict( - (k, v) for k, v in items(fields) if k in keep - ) - for key, value in items(fields): - setattr(self, key, value) - else: - self.state = state - self.timestamp = timestamp - for key, value in items(fields): - setattr(self, key, value) - - def info(self, fields=None, extra=[]): - """Information about this task suitable for on-screen display.""" - fields = self._info_fields if fields is None else fields - - def _keys(): - for key in list(fields) + list(extra): - value = getattr(self, key, None) - if value is not None: - yield key, value - - return dict(_keys()) - - def __repr__(self): - return R_TASK.format(self) - - def as_dict(self): - get = object.__getattribute__ - return dict( - (k, get(self, k)) for k in self._fields - ) - - def __reduce__(self): - return _depickle_task, (self.__class__, self.as_dict()) - - @property - def origin(self): - return self.client if self.worker is None else self.worker.id - - @property - def ready(self): - return self.state in states.READY_STATES - - @deprecated(3.2, 3.3) - def on_sent(self, timestamp=None, **fields): - self.event('sent', timestamp, fields) - - @deprecated(3.2, 3.3) - def on_received(self, timestamp=None, **fields): - self.event('received', timestamp, fields) - - @deprecated(3.2, 3.3) - def on_started(self, timestamp=None, **fields): - self.event('started', timestamp, fields) - - @deprecated(3.2, 3.3) - def on_failed(self, timestamp=None, **fields): - self.event('failed', timestamp, fields) - - @deprecated(3.2, 3.3) - def on_retried(self, timestamp=None, **fields): - self.event('retried', timestamp, fields) - - @deprecated(3.2, 3.3) - def on_succeeded(self, timestamp=None, **fields): - self.event('succeeded', timestamp, fields) - - @deprecated(3.2, 3.3) - def on_revoked(self, timestamp=None, **fields): - self.event('revoked', timestamp, fields) - - @deprecated(3.2, 3.3) - def on_unknown_event(self, shortype, timestamp=None, **fields): - self.event(shortype, timestamp, fields) - - @deprecated(3.2, 3.3) - def update(self, state, timestamp, fields, - _state=states.state, RETRY=states.RETRY): - return self.event(state, timestamp, None, fields) - - @deprecated(3.2, 3.3) - def merge(self, state, timestamp, fields): - keep = self.merge_rules.get(state) - if keep is not None: - fields = dict((k, v) for k, v in items(fields) if k in keep) - for key, value in items(fields): - setattr(self, key, value) - - @class_property - def _defaults(cls): - """Deprecated, to be removed in 3.3.""" - source = cls() - return dict((k, getattr(source, k)) for k in source._fields) - - -class State(object): - """Records clusters state.""" - Worker = Worker - Task = Task - event_count = 0 - task_count = 0 - heap_multiplier = 4 - - def __init__(self, callback=None, - workers=None, tasks=None, taskheap=None, - max_workers_in_memory=5000, max_tasks_in_memory=10000, - on_node_join=None, on_node_leave=None): - self.event_callback = callback - self.workers = (LRUCache(max_workers_in_memory) - if workers is None else workers) - self.tasks = (LRUCache(max_tasks_in_memory) - if tasks is None else tasks) - self._taskheap = [] if taskheap is None else taskheap - self.max_workers_in_memory = max_workers_in_memory - self.max_tasks_in_memory = max_tasks_in_memory - self.on_node_join = on_node_join - self.on_node_leave = on_node_leave - self._mutex = threading.Lock() - self.handlers = {} - self._seen_types = set() - self.rebuild_taskheap() - - @cached_property - def _event(self): - return self._create_dispatcher() - - def freeze_while(self, fun, *args, **kwargs): - clear_after = kwargs.pop('clear_after', False) - with self._mutex: - try: - return fun(*args, **kwargs) - finally: - if clear_after: - self._clear() - - def clear_tasks(self, ready=True): - with self._mutex: - return self._clear_tasks(ready) - - def _clear_tasks(self, ready=True): - if ready: - in_progress = dict( - (uuid, task) for uuid, task in self.itertasks() - if task.state not in states.READY_STATES) - self.tasks.clear() - self.tasks.update(in_progress) - else: - self.tasks.clear() - self._taskheap[:] = [] - - def _clear(self, ready=True): - self.workers.clear() - self._clear_tasks(ready) - self.event_count = 0 - self.task_count = 0 - - def clear(self, ready=True): - with self._mutex: - return self._clear(ready) - - def get_or_create_worker(self, hostname, **kwargs): - """Get or create worker by hostname. - - Return tuple of ``(worker, was_created)``. - """ - try: - worker = self.workers[hostname] - if kwargs: - worker.update(kwargs) - return worker, False - except KeyError: - worker = self.workers[hostname] = self.Worker( - hostname, **kwargs) - return worker, True - - def get_or_create_task(self, uuid): - """Get or create task by uuid.""" - try: - return self.tasks[uuid], False - except KeyError: - task = self.tasks[uuid] = self.Task(uuid) - return task, True - - def event(self, event): - with self._mutex: - return self._event(event) - - def task_event(self, type_, fields): - """Deprecated, use :meth:`event`.""" - return self._event(dict(fields, type='-'.join(['task', type_])))[0] - - def worker_event(self, type_, fields): - """Deprecated, use :meth:`event`.""" - return self._event(dict(fields, type='-'.join(['worker', type_])))[0] - - def _create_dispatcher(self): - get_handler = self.handlers.__getitem__ - event_callback = self.event_callback - wfields = itemgetter('hostname', 'timestamp', 'local_received') - tfields = itemgetter('uuid', 'hostname', 'timestamp', - 'local_received', 'clock') - taskheap = self._taskheap - th_append = taskheap.append - th_pop = taskheap.pop - # Removing events from task heap is an O(n) operation, - # so easier to just account for the common number of events - # for each task (PENDING->RECEIVED->STARTED->final) - #: an O(n) operation - max_events_in_heap = self.max_tasks_in_memory * self.heap_multiplier - add_type = self._seen_types.add - on_node_join, on_node_leave = self.on_node_join, self.on_node_leave - tasks, Task = self.tasks, self.Task - workers, Worker = self.workers, self.Worker - # avoid updating LRU entry at getitem - get_worker, get_task = workers.data.__getitem__, tasks.data.__getitem__ - - def _event(event, - timetuple=timetuple, KeyError=KeyError, - insort=bisect.insort, created=True): - self.event_count += 1 - if event_callback: - event_callback(self, event) - group, _, subject = event['type'].partition('-') - try: - handler = get_handler(group) - except KeyError: - pass - else: - return handler(subject, event), subject - - if group == 'worker': - try: - hostname, timestamp, local_received = wfields(event) - except KeyError: - pass - else: - is_offline = subject == 'offline' - try: - worker, created = get_worker(hostname), False - except KeyError: - if is_offline: - worker, created = Worker(hostname), False - else: - worker = workers[hostname] = Worker(hostname) - worker.event(subject, timestamp, local_received, event) - if on_node_join and (created or subject == 'online'): - on_node_join(worker) - if on_node_leave and is_offline: - on_node_leave(worker) - workers.pop(hostname, None) - return (worker, created), subject - elif group == 'task': - (uuid, hostname, timestamp, - local_received, clock) = tfields(event) - # task-sent event is sent by client, not worker - is_client_event = subject == 'sent' - try: - task, created = get_task(uuid), False - except KeyError: - task = tasks[uuid] = Task(uuid) - if is_client_event: - task.client = hostname - else: - try: - worker, created = get_worker(hostname), False - except KeyError: - worker = workers[hostname] = Worker(hostname) - task.worker = worker - if worker is not None and local_received: - worker.event(None, local_received, timestamp) - - origin = hostname if is_client_event else worker.id - - # remove oldest event if exceeding the limit. - heaps = len(taskheap) - if heaps + 1 > max_events_in_heap: - th_pop(0) - - # most events will be dated later than the previous. - timetup = timetuple(clock, timestamp, origin, ref(task)) - if heaps and timetup > taskheap[-1]: - th_append(timetup) - else: - insort(taskheap, timetup) - - if subject == 'received': - self.task_count += 1 - task.event(subject, timestamp, local_received, event) - task_name = task.name - if task_name is not None: - add_type(task_name) - return (task, created), subject - return _event - - def rebuild_taskheap(self, timetuple=timetuple): - heap = self._taskheap[:] = [ - timetuple(t.clock, t.timestamp, t.origin, ref(t)) - for t in values(self.tasks) - ] - heap.sort() - - def itertasks(self, limit=None): - for index, row in enumerate(items(self.tasks)): - yield row - if limit and index + 1 >= limit: - break - - def tasks_by_time(self, limit=None): - """Generator giving tasks ordered by time, - in ``(uuid, Task)`` tuples.""" - seen = set() - for evtup in islice(reversed(self._taskheap), 0, limit): - task = evtup[3]() - if task is not None: - uuid = task.uuid - if uuid not in seen: - yield uuid, task - seen.add(uuid) - tasks_by_timestamp = tasks_by_time - - def tasks_by_type(self, name, limit=None): - """Get all tasks by type. - - Return a list of ``(uuid, Task)`` tuples. - - """ - return islice( - ((uuid, task) for uuid, task in self.tasks_by_time() - if task.name == name), - 0, limit, - ) - - def tasks_by_worker(self, hostname, limit=None): - """Get all tasks by worker. - - """ - return islice( - ((uuid, task) for uuid, task in self.tasks_by_time() - if task.worker.hostname == hostname), - 0, limit, - ) - - def task_types(self): - """Return a list of all seen task types.""" - return sorted(self._seen_types) - - def alive_workers(self): - """Return a list of (seemingly) alive workers.""" - return [w for w in values(self.workers) if w.alive] - - def __repr__(self): - return R_STATE.format(self) - - def __reduce__(self): - return self.__class__, ( - self.event_callback, self.workers, self.tasks, None, - self.max_workers_in_memory, self.max_tasks_in_memory, - self.on_node_join, self.on_node_leave, - ) diff --git a/thesisenv/lib/python3.6/site-packages/celery/exceptions.py b/thesisenv/lib/python3.6/site-packages/celery/exceptions.py deleted file mode 100644 index ab65019..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/exceptions.py +++ /dev/null @@ -1,171 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.exceptions - ~~~~~~~~~~~~~~~~~ - - This module contains all exceptions used by the Celery API. - -""" -from __future__ import absolute_import - -import numbers - -from .five import string_t - -from billiard.exceptions import ( # noqa - SoftTimeLimitExceeded, TimeLimitExceeded, WorkerLostError, Terminated, -) - -__all__ = ['SecurityError', 'Ignore', 'QueueNotFound', - 'WorkerShutdown', 'WorkerTerminate', - 'ImproperlyConfigured', 'NotRegistered', 'AlreadyRegistered', - 'TimeoutError', 'MaxRetriesExceededError', 'Retry', - 'TaskRevokedError', 'NotConfigured', 'AlwaysEagerIgnored', - 'InvalidTaskError', 'ChordError', 'CPendingDeprecationWarning', - 'CDeprecationWarning', 'FixupWarning', 'DuplicateNodenameWarning', - 'SoftTimeLimitExceeded', 'TimeLimitExceeded', 'WorkerLostError', - 'Terminated'] - -UNREGISTERED_FMT = """\ -Task of kind {0} is not registered, please make sure it's imported.\ -""" - - -class SecurityError(Exception): - """Security related exceptions. - - Handle with care. - - """ - - -class Ignore(Exception): - """A task can raise this to ignore doing state updates.""" - - -class Reject(Exception): - """A task can raise this if it wants to reject/requeue the message.""" - - def __init__(self, reason=None, requeue=False): - self.reason = reason - self.requeue = requeue - super(Reject, self).__init__(reason, requeue) - - def __repr__(self): - return 'reject requeue=%s: %s' % (self.requeue, self.reason) - - -class WorkerTerminate(SystemExit): - """Signals that the worker should terminate immediately.""" -SystemTerminate = WorkerTerminate # XXX compat - - -class WorkerShutdown(SystemExit): - """Signals that the worker should perform a warm shutdown.""" - - -class QueueNotFound(KeyError): - """Task routed to a queue not in CELERY_QUEUES.""" - - -class ImproperlyConfigured(ImportError): - """Celery is somehow improperly configured.""" - - -class NotRegistered(KeyError): - """The task is not registered.""" - - def __repr__(self): - return UNREGISTERED_FMT.format(self) - - -class AlreadyRegistered(Exception): - """The task is already registered.""" - - -class TimeoutError(Exception): - """The operation timed out.""" - - -class MaxRetriesExceededError(Exception): - """The tasks max restart limit has been exceeded.""" - - -class Retry(Exception): - """The task is to be retried later.""" - - #: Optional message describing context of retry. - message = None - - #: Exception (if any) that caused the retry to happen. - exc = None - - #: Time of retry (ETA), either :class:`numbers.Real` or - #: :class:`~datetime.datetime`. - when = None - - def __init__(self, message=None, exc=None, when=None, **kwargs): - from kombu.utils.encoding import safe_repr - self.message = message - if isinstance(exc, string_t): - self.exc, self.excs = None, exc - else: - self.exc, self.excs = exc, safe_repr(exc) if exc else None - self.when = when - Exception.__init__(self, exc, when, **kwargs) - - def humanize(self): - if isinstance(self.when, numbers.Real): - return 'in {0.when}s'.format(self) - return 'at {0.when}'.format(self) - - def __str__(self): - if self.message: - return self.message - if self.excs: - return 'Retry {0}: {1}'.format(self.humanize(), self.excs) - return 'Retry {0}'.format(self.humanize()) - - def __reduce__(self): - return self.__class__, (self.message, self.excs, self.when) -RetryTaskError = Retry # XXX compat - - -class TaskRevokedError(Exception): - """The task has been revoked, so no result available.""" - - -class NotConfigured(UserWarning): - """Celery has not been configured, as no config module has been found.""" - - -class AlwaysEagerIgnored(UserWarning): - """send_task ignores CELERY_ALWAYS_EAGER option""" - - -class InvalidTaskError(Exception): - """The task has invalid data or is not properly constructed.""" - - -class IncompleteStream(Exception): - """Found the end of a stream of data, but the data is not yet complete.""" - - -class ChordError(Exception): - """A task part of the chord raised an exception.""" - - -class CPendingDeprecationWarning(PendingDeprecationWarning): - pass - - -class CDeprecationWarning(DeprecationWarning): - pass - - -class FixupWarning(UserWarning): - pass - - -class DuplicateNodenameWarning(UserWarning): - """Multiple workers are using the same nodename.""" diff --git a/thesisenv/lib/python3.6/site-packages/celery/five.py b/thesisenv/lib/python3.6/site-packages/celery/five.py deleted file mode 100644 index 2406920..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/five.py +++ /dev/null @@ -1,392 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.five - ~~~~~~~~~~~ - - Compatibility implementations of features - only available in newer Python versions. - - -""" -from __future__ import absolute_import - -import io -import operator -import sys - -from importlib import import_module -from types import ModuleType - -from kombu.five import monotonic - -try: - from collections import Counter -except ImportError: # pragma: no cover - from collections import defaultdict - - def Counter(): # noqa - return defaultdict(int) - -__all__ = ['Counter', 'reload', 'UserList', 'UserDict', 'Queue', 'Empty', - 'zip_longest', 'map', 'string', 'string_t', - 'long_t', 'text_t', 'range', 'int_types', 'items', 'keys', 'values', - 'nextfun', 'reraise', 'WhateverIO', 'with_metaclass', - 'OrderedDict', 'THREAD_TIMEOUT_MAX', 'format_d', - 'class_property', 'reclassmethod', 'create_module', - 'recreate_module', 'monotonic'] - -# ############# py3k ######################################################### -PY3 = sys.version_info[0] == 3 - -try: - reload = reload # noqa -except NameError: # pragma: no cover - from imp import reload # noqa - -try: - from UserList import UserList # noqa -except ImportError: # pragma: no cover - from collections import UserList # noqa - -try: - from UserDict import UserDict # noqa -except ImportError: # pragma: no cover - from collections import UserDict # noqa - - -if PY3: # pragma: no cover - import builtins - - from queue import Queue, Empty - from itertools import zip_longest - - map = map - string = str - string_t = str - long_t = int - text_t = str - range = range - int_types = (int, ) - _byte_t = bytes - - open_fqdn = 'builtins.open' - - def items(d): - return d.items() - - def keys(d): - return d.keys() - - def values(d): - return d.values() - - def nextfun(it): - return it.__next__ - - exec_ = getattr(builtins, 'exec') - - def reraise(tp, value, tb=None): - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - -else: - import __builtin__ as builtins # noqa - from Queue import Queue, Empty # noqa - from itertools import imap as map, izip_longest as zip_longest # noqa - string = unicode # noqa - string_t = basestring # noqa - text_t = unicode # noqa - long_t = long # noqa - range = xrange # noqa - int_types = (int, long) # noqa - _byte_t = (str, bytes) # noqa - - open_fqdn = '__builtin__.open' - - def items(d): # noqa - return d.iteritems() - - def keys(d): # noqa - return d.iterkeys() - - def values(d): # noqa - return d.itervalues() - - def nextfun(it): # noqa - return it.next - - def exec_(code, globs=None, locs=None): # pragma: no cover - """Execute code in a namespace.""" - if globs is None: - frame = sys._getframe(1) - globs = frame.f_globals - if locs is None: - locs = frame.f_locals - del frame - elif locs is None: - locs = globs - exec("""exec code in globs, locs""") - - exec_("""def reraise(tp, value, tb=None): raise tp, value, tb""") - - -def with_metaclass(Type, skip_attrs=set(['__dict__', '__weakref__'])): - """Class decorator to set metaclass. - - Works with both Python 2 and Python 3 and it does not add - an extra class in the lookup order like ``six.with_metaclass`` does - (that is -- it copies the original class instead of using inheritance). - - """ - - def _clone_with_metaclass(Class): - attrs = dict((key, value) for key, value in items(vars(Class)) - if key not in skip_attrs) - return Type(Class.__name__, Class.__bases__, attrs) - - return _clone_with_metaclass - - -# ############# collections.OrderedDict ###################################### -# was moved to kombu -from kombu.utils.compat import OrderedDict # noqa - -# ############# threading.TIMEOUT_MAX ######################################## -try: - from threading import TIMEOUT_MAX as THREAD_TIMEOUT_MAX -except ImportError: - THREAD_TIMEOUT_MAX = 1e10 # noqa - -# ############# format(int, ',d') ############################################ - -if sys.version_info >= (2, 7): # pragma: no cover - def format_d(i): - return format(i, ',d') -else: # pragma: no cover - def format_d(i): # noqa - s = '%d' % i - groups = [] - while s and s[-1].isdigit(): - groups.append(s[-3:]) - s = s[:-3] - return s + ','.join(reversed(groups)) - - -# ############# Module Generation ############################################ - -# Utilities to dynamically -# recreate modules, either for lazy loading or -# to create old modules at runtime instead of -# having them litter the source tree. - -# import fails in python 2.5. fallback to reduce in stdlib -try: - from functools import reduce -except ImportError: - pass - -MODULE_DEPRECATED = """ -The module %s is deprecated and will be removed in a future version. -""" - -DEFAULT_ATTRS = set(['__file__', '__path__', '__doc__', '__all__']) - -# im_func is no longer available in Py3. -# instead the unbound method itself can be used. -if sys.version_info[0] == 3: # pragma: no cover - def fun_of_method(method): - return method -else: - def fun_of_method(method): # noqa - return method.im_func - - -def getappattr(path): - """Gets attribute from the current_app recursively, - e.g. getappattr('amqp.get_task_consumer')``.""" - from celery import current_app - return current_app._rgetattr(path) - - -def _compat_task_decorator(*args, **kwargs): - from celery import current_app - kwargs.setdefault('accept_magic_kwargs', True) - return current_app.task(*args, **kwargs) - - -def _compat_periodic_task_decorator(*args, **kwargs): - from celery.task import periodic_task - kwargs.setdefault('accept_magic_kwargs', True) - return periodic_task(*args, **kwargs) - - -COMPAT_MODULES = { - 'celery': { - 'execute': { - 'send_task': 'send_task', - }, - 'decorators': { - 'task': _compat_task_decorator, - 'periodic_task': _compat_periodic_task_decorator, - }, - 'log': { - 'get_default_logger': 'log.get_default_logger', - 'setup_logger': 'log.setup_logger', - 'setup_logging_subsystem': 'log.setup_logging_subsystem', - 'redirect_stdouts_to_logger': 'log.redirect_stdouts_to_logger', - }, - 'messaging': { - 'TaskPublisher': 'amqp.TaskPublisher', - 'TaskConsumer': 'amqp.TaskConsumer', - 'establish_connection': 'connection', - 'get_consumer_set': 'amqp.TaskConsumer', - }, - 'registry': { - 'tasks': 'tasks', - }, - }, - 'celery.task': { - 'control': { - 'broadcast': 'control.broadcast', - 'rate_limit': 'control.rate_limit', - 'time_limit': 'control.time_limit', - 'ping': 'control.ping', - 'revoke': 'control.revoke', - 'discard_all': 'control.purge', - 'inspect': 'control.inspect', - }, - 'schedules': 'celery.schedules', - 'chords': 'celery.canvas', - } -} - - -class class_property(object): - - def __init__(self, getter=None, setter=None): - if getter is not None and not isinstance(getter, classmethod): - getter = classmethod(getter) - if setter is not None and not isinstance(setter, classmethod): - setter = classmethod(setter) - self.__get = getter - self.__set = setter - - info = getter.__get__(object) # just need the info attrs. - self.__doc__ = info.__doc__ - self.__name__ = info.__name__ - self.__module__ = info.__module__ - - def __get__(self, obj, type=None): - if obj and type is None: - type = obj.__class__ - return self.__get.__get__(obj, type)() - - def __set__(self, obj, value): - if obj is None: - return self - return self.__set.__get__(obj)(value) - - def setter(self, setter): - return self.__class__(self.__get, setter) - - -def reclassmethod(method): - return classmethod(fun_of_method(method)) - - -class LazyModule(ModuleType): - _compat_modules = () - _all_by_module = {} - _direct = {} - _object_origins = {} - - def __getattr__(self, name): - if name in self._object_origins: - module = __import__(self._object_origins[name], None, None, [name]) - for item in self._all_by_module[module.__name__]: - setattr(self, item, getattr(module, item)) - return getattr(module, name) - elif name in self._direct: # pragma: no cover - module = __import__(self._direct[name], None, None, [name]) - setattr(self, name, module) - return module - return ModuleType.__getattribute__(self, name) - - def __dir__(self): - return list(set(self.__all__) | DEFAULT_ATTRS) - - def __reduce__(self): - return import_module, (self.__name__, ) - - -def create_module(name, attrs, cls_attrs=None, pkg=None, - base=LazyModule, prepare_attr=None): - fqdn = '.'.join([pkg.__name__, name]) if pkg else name - cls_attrs = {} if cls_attrs is None else cls_attrs - pkg, _, modname = name.rpartition('.') - cls_attrs['__module__'] = pkg - - attrs = dict((attr_name, prepare_attr(attr) if prepare_attr else attr) - for attr_name, attr in items(attrs)) - module = sys.modules[fqdn] = type(modname, (base, ), cls_attrs)(fqdn) - module.__dict__.update(attrs) - return module - - -def recreate_module(name, compat_modules=(), by_module={}, direct={}, - base=LazyModule, **attrs): - old_module = sys.modules[name] - origins = get_origins(by_module) - compat_modules = COMPAT_MODULES.get(name, ()) - - cattrs = dict( - _compat_modules=compat_modules, - _all_by_module=by_module, _direct=direct, - _object_origins=origins, - __all__=tuple(set(reduce( - operator.add, - [tuple(v) for v in [compat_modules, origins, direct, attrs]], - ))), - ) - new_module = create_module(name, attrs, cls_attrs=cattrs, base=base) - new_module.__dict__.update(dict((mod, get_compat_module(new_module, mod)) - for mod in compat_modules)) - return old_module, new_module - - -def get_compat_module(pkg, name): - from .local import Proxy - - def prepare(attr): - if isinstance(attr, string_t): - return Proxy(getappattr, (attr, )) - return attr - - attrs = COMPAT_MODULES[pkg.__name__][name] - if isinstance(attrs, string_t): - fqdn = '.'.join([pkg.__name__, name]) - module = sys.modules[fqdn] = import_module(attrs) - return module - attrs['__all__'] = list(attrs) - return create_module(name, dict(attrs), pkg=pkg, prepare_attr=prepare) - - -def get_origins(defs): - origins = {} - for module, attrs in items(defs): - origins.update(dict((attr, module) for attr in attrs)) - return origins - - -_SIO_write = io.StringIO.write -_SIO_init = io.StringIO.__init__ - - -class WhateverIO(io.StringIO): - - def __init__(self, v=None, *a, **kw): - _SIO_init(self, v.decode() if isinstance(v, _byte_t) else v, *a, **kw) - - def write(self, data): - _SIO_write(self, data.decode() if isinstance(data, _byte_t) else data) diff --git a/thesisenv/lib/python3.6/site-packages/celery/fixups/django.py b/thesisenv/lib/python3.6/site-packages/celery/fixups/django.py deleted file mode 100644 index 73c5c28..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/fixups/django.py +++ /dev/null @@ -1,266 +0,0 @@ -from __future__ import absolute_import - -import os -import sys -import warnings - -from kombu.utils import cached_property, symbol_by_name - -from datetime import datetime -from importlib import import_module - -from celery import signals -from celery.exceptions import FixupWarning - -if sys.version_info[0] < 3 and not hasattr(sys, 'pypy_version_info'): - from StringIO import StringIO -else: - from io import StringIO - -__all__ = ['DjangoFixup', 'fixup'] - -ERR_NOT_INSTALLED = """\ -Environment variable DJANGO_SETTINGS_MODULE is defined -but Django is not installed. Will not apply Django fixups! -""" - - -def _maybe_close_fd(fh): - try: - os.close(fh.fileno()) - except (AttributeError, OSError, TypeError): - # TypeError added for celery#962 - pass - - -def fixup(app, env='DJANGO_SETTINGS_MODULE'): - SETTINGS_MODULE = os.environ.get(env) - if SETTINGS_MODULE and 'django' not in app.loader_cls.lower(): - try: - import django # noqa - except ImportError: - warnings.warn(FixupWarning(ERR_NOT_INSTALLED)) - else: - return DjangoFixup(app).install() - - -class DjangoFixup(object): - - def __init__(self, app): - self.app = app - self.app.set_default() - self._worker_fixup = None - - def install(self): - # Need to add project directory to path - sys.path.append(os.getcwd()) - - self.app.loader.now = self.now - self.app.loader.mail_admins = self.mail_admins - - signals.import_modules.connect(self.on_import_modules) - signals.worker_init.connect(self.on_worker_init) - return self - - @cached_property - def worker_fixup(self): - if self._worker_fixup is None: - self._worker_fixup = DjangoWorkerFixup(self.app) - return self._worker_fixup - - def on_import_modules(self, **kwargs): - # call django.setup() before task modules are imported - self.worker_fixup.validate_models() - - def on_worker_init(self, **kwargs): - self.worker_fixup.install() - - def now(self, utc=False): - return datetime.utcnow() if utc else self._now() - - def mail_admins(self, subject, body, fail_silently=False, **kwargs): - return self._mail_admins(subject, body, fail_silently=fail_silently) - - @cached_property - def _mail_admins(self): - return symbol_by_name('django.core.mail:mail_admins') - - @cached_property - def _now(self): - try: - return symbol_by_name('django.utils.timezone:now') - except (AttributeError, ImportError): # pre django-1.4 - return datetime.now - - -class DjangoWorkerFixup(object): - _db_recycles = 0 - - def __init__(self, app): - self.app = app - self.db_reuse_max = self.app.conf.get('CELERY_DB_REUSE_MAX', None) - self._db = import_module('django.db') - self._cache = import_module('django.core.cache') - self._settings = symbol_by_name('django.conf:settings') - - # Database-related exceptions. - DatabaseError = symbol_by_name('django.db:DatabaseError') - try: - import MySQLdb as mysql - _my_database_errors = (mysql.DatabaseError, - mysql.InterfaceError, - mysql.OperationalError) - except ImportError: - _my_database_errors = () # noqa - try: - import psycopg2 as pg - _pg_database_errors = (pg.DatabaseError, - pg.InterfaceError, - pg.OperationalError) - except ImportError: - _pg_database_errors = () # noqa - try: - import sqlite3 - _lite_database_errors = (sqlite3.DatabaseError, - sqlite3.InterfaceError, - sqlite3.OperationalError) - except ImportError: - _lite_database_errors = () # noqa - try: - import cx_Oracle as oracle - _oracle_database_errors = (oracle.DatabaseError, - oracle.InterfaceError, - oracle.OperationalError) - except ImportError: - _oracle_database_errors = () # noqa - - try: - self._close_old_connections = symbol_by_name( - 'django.db:close_old_connections', - ) - except (ImportError, AttributeError): - self._close_old_connections = None - self.database_errors = ( - (DatabaseError, ) + - _my_database_errors + - _pg_database_errors + - _lite_database_errors + - _oracle_database_errors - ) - - def validate_models(self): - import django - try: - django_setup = django.setup - except AttributeError: - pass - else: - django_setup() - s = StringIO() - try: - from django.core.management.validation import get_validation_errors - except ImportError: - from django.core.management.base import BaseCommand - cmd = BaseCommand() - try: - # since django 1.5 - from django.core.management.base import OutputWrapper - cmd.stdout = OutputWrapper(sys.stdout) - cmd.stderr = OutputWrapper(sys.stderr) - except ImportError: - cmd.stdout, cmd.stderr = sys.stdout, sys.stderr - - cmd.check() - else: - num_errors = get_validation_errors(s, None) - if num_errors: - raise RuntimeError( - 'One or more Django models did not validate:\n{0}'.format( - s.getvalue())) - - def install(self): - signals.beat_embedded_init.connect(self.close_database) - signals.worker_ready.connect(self.on_worker_ready) - signals.task_prerun.connect(self.on_task_prerun) - signals.task_postrun.connect(self.on_task_postrun) - signals.worker_process_init.connect(self.on_worker_process_init) - self.close_database() - self.close_cache() - return self - - def on_worker_process_init(self, **kwargs): - # Child process must validate models again if on Windows, - # or if they were started using execv. - if os.environ.get('FORKED_BY_MULTIPROCESSING'): - self.validate_models() - - # close connections: - # the parent process may have established these, - # so need to close them. - - # calling db.close() on some DB connections will cause - # the inherited DB conn to also get broken in the parent - # process so we need to remove it without triggering any - # network IO that close() might cause. - try: - for c in self._db.connections.all(): - if c and c.connection: - _maybe_close_fd(c.connection) - except AttributeError: - if self._db.connection and self._db.connection.connection: - _maybe_close_fd(self._db.connection.connection) - - # use the _ version to avoid DB_REUSE preventing the conn.close() call - self._close_database() - self.close_cache() - - def on_task_prerun(self, sender, **kwargs): - """Called before every task.""" - if not getattr(sender.request, 'is_eager', False): - self.close_database() - - def on_task_postrun(self, sender, **kwargs): - # See http://groups.google.com/group/django-users/ - # browse_thread/thread/78200863d0c07c6d/ - if not getattr(sender.request, 'is_eager', False): - self.close_database() - self.close_cache() - - def close_database(self, **kwargs): - if self._close_old_connections: - return self._close_old_connections() # Django 1.6 - if not self.db_reuse_max: - return self._close_database() - if self._db_recycles >= self.db_reuse_max * 2: - self._db_recycles = 0 - self._close_database() - self._db_recycles += 1 - - def _close_database(self): - try: - funs = [conn.close for conn in self._db.connections.all()] - except AttributeError: - if hasattr(self._db, 'close_old_connections'): # django 1.6 - funs = [self._db.close_old_connections] - else: - # pre multidb, pending deprication in django 1.6 - funs = [self._db.close_connection] - - for close in funs: - try: - close() - except self.database_errors as exc: - str_exc = str(exc) - if 'closed' not in str_exc and 'not connected' not in str_exc: - raise - - def close_cache(self): - try: - self._cache.cache.close() - except (TypeError, AttributeError): - pass - - def on_worker_ready(self, **kwargs): - if self._settings.DEBUG: - warnings.warn('Using settings.DEBUG leads to a memory leak, never ' - 'use this setting in production environments!') diff --git a/thesisenv/lib/python3.6/site-packages/celery/loaders/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/loaders/__init__.py deleted file mode 100644 index 2a39ba2..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/loaders/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.loaders - ~~~~~~~~~~~~~~ - - Loaders define how configuration is read, what happens - when workers start, when tasks are executed and so on. - -""" -from __future__ import absolute_import - -from celery._state import current_app -from celery.utils import deprecated -from celery.utils.imports import symbol_by_name, import_from_cwd - -__all__ = ['get_loader_cls'] - -LOADER_ALIASES = {'app': 'celery.loaders.app:AppLoader', - 'default': 'celery.loaders.default:Loader', - 'django': 'djcelery.loaders:DjangoLoader'} - - -def get_loader_cls(loader): - """Get loader class by name/alias""" - return symbol_by_name(loader, LOADER_ALIASES, imp=import_from_cwd) - - -@deprecated(deprecation=2.5, removal=4.0, - alternative='celery.current_app.loader') -def current_loader(): - return current_app.loader - - -@deprecated(deprecation=2.5, removal=4.0, - alternative='celery.current_app.conf') -def load_settings(): - return current_app.conf diff --git a/thesisenv/lib/python3.6/site-packages/celery/loaders/app.py b/thesisenv/lib/python3.6/site-packages/celery/loaders/app.py deleted file mode 100644 index 87f034b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/loaders/app.py +++ /dev/null @@ -1,17 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.loaders.app - ~~~~~~~~~~~~~~~~~~ - - The default loader used with custom app instances. - -""" -from __future__ import absolute_import - -from .base import BaseLoader - -__all__ = ['AppLoader'] - - -class AppLoader(BaseLoader): - pass diff --git a/thesisenv/lib/python3.6/site-packages/celery/loaders/base.py b/thesisenv/lib/python3.6/site-packages/celery/loaders/base.py deleted file mode 100644 index 401be7b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/loaders/base.py +++ /dev/null @@ -1,299 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.loaders.base - ~~~~~~~~~~~~~~~~~~~ - - Loader base class. - -""" -from __future__ import absolute_import - -import anyjson -import imp as _imp -import importlib -import os -import re -import sys - -from datetime import datetime - -from kombu.utils import cached_property -from kombu.utils.encoding import safe_str - -from celery import signals -from celery.datastructures import DictAttribute, force_mapping -from celery.five import reraise, string_t -from celery.utils.functional import maybe_list -from celery.utils.imports import ( - import_from_cwd, symbol_by_name, NotAPackage, find_module, -) - -__all__ = ['BaseLoader'] - -_RACE_PROTECTION = False -CONFIG_INVALID_NAME = """\ -Error: Module '{module}' doesn't exist, or it's not a valid \ -Python module name. -""" - -CONFIG_WITH_SUFFIX = CONFIG_INVALID_NAME + """\ -Did you mean '{suggest}'? -""" - - -class BaseLoader(object): - """The base class for loaders. - - Loaders handles, - - * Reading celery client/worker configurations. - - * What happens when a task starts? - See :meth:`on_task_init`. - - * What happens when the worker starts? - See :meth:`on_worker_init`. - - * What happens when the worker shuts down? - See :meth:`on_worker_shutdown`. - - * What modules are imported to find tasks? - - """ - builtin_modules = frozenset() - configured = False - override_backends = {} - worker_initialized = False - - _conf = None - - def __init__(self, app, **kwargs): - self.app = app - self.task_modules = set() - - def now(self, utc=True): - if utc: - return datetime.utcnow() - return datetime.now() - - def on_task_init(self, task_id, task): - """This method is called before a task is executed.""" - pass - - def on_process_cleanup(self): - """This method is called after a task is executed.""" - pass - - def on_worker_init(self): - """This method is called when the worker (:program:`celery worker`) - starts.""" - pass - - def on_worker_shutdown(self): - """This method is called when the worker (:program:`celery worker`) - shuts down.""" - pass - - def on_worker_process_init(self): - """This method is called when a child process starts.""" - pass - - def import_task_module(self, module): - self.task_modules.add(module) - return self.import_from_cwd(module) - - def import_module(self, module, package=None): - return importlib.import_module(module, package=package) - - def import_from_cwd(self, module, imp=None, package=None): - return import_from_cwd( - module, - self.import_module if imp is None else imp, - package=package, - ) - - def import_default_modules(self): - signals.import_modules.send(sender=self.app) - return [ - self.import_task_module(m) for m in ( - tuple(self.builtin_modules) + - tuple(maybe_list(self.app.conf.CELERY_IMPORTS)) + - tuple(maybe_list(self.app.conf.CELERY_INCLUDE)) - ) - ] - - def init_worker(self): - if not self.worker_initialized: - self.worker_initialized = True - self.import_default_modules() - self.on_worker_init() - - def shutdown_worker(self): - self.on_worker_shutdown() - - def init_worker_process(self): - self.on_worker_process_init() - - def config_from_object(self, obj, silent=False): - if isinstance(obj, string_t): - try: - obj = self._smart_import(obj, imp=self.import_from_cwd) - except (ImportError, AttributeError): - if silent: - return False - raise - self._conf = force_mapping(obj) - return True - - def _smart_import(self, path, imp=None): - imp = self.import_module if imp is None else imp - if ':' in path: - # Path includes attribute so can just jump here. - # e.g. ``os.path:abspath``. - return symbol_by_name(path, imp=imp) - - # Not sure if path is just a module name or if it includes an - # attribute name (e.g. ``os.path``, vs, ``os.path.abspath``). - try: - return imp(path) - except ImportError: - # Not a module name, so try module + attribute. - return symbol_by_name(path, imp=imp) - - def _import_config_module(self, name): - try: - self.find_module(name) - except NotAPackage: - if name.endswith('.py'): - reraise(NotAPackage, NotAPackage(CONFIG_WITH_SUFFIX.format( - module=name, suggest=name[:-3])), sys.exc_info()[2]) - reraise(NotAPackage, NotAPackage(CONFIG_INVALID_NAME.format( - module=name)), sys.exc_info()[2]) - else: - return self.import_from_cwd(name) - - def find_module(self, module): - return find_module(module) - - def cmdline_config_parser( - self, args, namespace='celery', - re_type=re.compile(r'\((\w+)\)'), - extra_types={'json': anyjson.loads}, - override_types={'tuple': 'json', - 'list': 'json', - 'dict': 'json'}): - from celery.app.defaults import Option, NAMESPACES - namespace = namespace.upper() - typemap = dict(Option.typemap, **extra_types) - - def getarg(arg): - """Parse a single configuration definition from - the command-line.""" - - # ## find key/value - # ns.key=value|ns_key=value (case insensitive) - key, value = arg.split('=', 1) - key = key.upper().replace('.', '_') - - # ## find namespace. - # .key=value|_key=value expands to default namespace. - if key[0] == '_': - ns, key = namespace, key[1:] - else: - # find namespace part of key - ns, key = key.split('_', 1) - - ns_key = (ns and ns + '_' or '') + key - - # (type)value makes cast to custom type. - cast = re_type.match(value) - if cast: - type_ = cast.groups()[0] - type_ = override_types.get(type_, type_) - value = value[len(cast.group()):] - value = typemap[type_](value) - else: - try: - value = NAMESPACES[ns][key].to_python(value) - except ValueError as exc: - # display key name in error message. - raise ValueError('{0!r}: {1}'.format(ns_key, exc)) - return ns_key, value - return dict(getarg(arg) for arg in args) - - def mail_admins(self, subject, body, fail_silently=False, - sender=None, to=None, host=None, port=None, - user=None, password=None, timeout=None, - use_ssl=False, use_tls=False, charset='utf-8'): - message = self.mail.Message(sender=sender, to=to, - subject=safe_str(subject), - body=safe_str(body), - charset=charset) - mailer = self.mail.Mailer(host=host, port=port, - user=user, password=password, - timeout=timeout, use_ssl=use_ssl, - use_tls=use_tls) - mailer.send(message, fail_silently=fail_silently) - - def read_configuration(self, env='CELERY_CONFIG_MODULE'): - try: - custom_config = os.environ[env] - except KeyError: - pass - else: - if custom_config: - usercfg = self._import_config_module(custom_config) - return DictAttribute(usercfg) - return {} - - def autodiscover_tasks(self, packages, related_name='tasks'): - self.task_modules.update( - mod.__name__ for mod in autodiscover_tasks(packages or (), - related_name) if mod) - - @property - def conf(self): - """Loader configuration.""" - if self._conf is None: - self._conf = self.read_configuration() - return self._conf - - @cached_property - def mail(self): - return self.import_module('celery.utils.mail') - - -def autodiscover_tasks(packages, related_name='tasks'): - global _RACE_PROTECTION - - if _RACE_PROTECTION: - return () - _RACE_PROTECTION = True - try: - return [find_related_module(pkg, related_name) for pkg in packages] - finally: - _RACE_PROTECTION = False - - -def find_related_module(package, related_name): - """Given a package name and a module name, tries to find that - module.""" - - # Django 1.7 allows for speciying a class name in INSTALLED_APPS. - # (Issue #2248). - try: - importlib.import_module(package) - except ImportError: - package, _, _ = package.rpartition('.') - - try: - pkg_path = importlib.import_module(package).__path__ - except AttributeError: - return - - try: - _imp.find_module(related_name, pkg_path) - except ImportError: - return - - return importlib.import_module('{0}.{1}'.format(package, related_name)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/loaders/default.py b/thesisenv/lib/python3.6/site-packages/celery/loaders/default.py deleted file mode 100644 index 6071480..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/loaders/default.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.loaders.default - ~~~~~~~~~~~~~~~~~~~~~~ - - The default loader used when no custom app has been initialized. - -""" -from __future__ import absolute_import - -import os -import warnings - -from celery.datastructures import DictAttribute -from celery.exceptions import NotConfigured -from celery.utils import strtobool - -from .base import BaseLoader - -__all__ = ['Loader', 'DEFAULT_CONFIG_MODULE'] - -DEFAULT_CONFIG_MODULE = 'celeryconfig' - -#: Warns if configuration file is missing if :envvar:`C_WNOCONF` is set. -C_WNOCONF = strtobool(os.environ.get('C_WNOCONF', False)) - - -class Loader(BaseLoader): - """The loader used by the default app.""" - - def setup_settings(self, settingsdict): - return DictAttribute(settingsdict) - - def read_configuration(self, fail_silently=True): - """Read configuration from :file:`celeryconfig.py` and configure - celery and Django so it can be used by regular Python.""" - configname = os.environ.get('CELERY_CONFIG_MODULE', - DEFAULT_CONFIG_MODULE) - try: - usercfg = self._import_config_module(configname) - except ImportError: - if not fail_silently: - raise - # billiard sets this if forked using execv - if C_WNOCONF and not os.environ.get('FORKED_BY_MULTIPROCESSING'): - warnings.warn(NotConfigured( - 'No {module} module found! Please make sure it exists and ' - 'is available to Python.'.format(module=configname))) - return self.setup_settings({}) - else: - self.configured = True - return self.setup_settings(usercfg) diff --git a/thesisenv/lib/python3.6/site-packages/celery/local.py b/thesisenv/lib/python3.6/site-packages/celery/local.py deleted file mode 100644 index 50da8bc..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/local.py +++ /dev/null @@ -1,373 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.local - ~~~~~~~~~~~~ - - This module contains critical utilities that - needs to be loaded as soon as possible, and that - shall not load any third party modules. - - Parts of this module is Copyright by Werkzeug Team. - -""" -from __future__ import absolute_import - -import importlib -import sys - -from .five import string - -__all__ = ['Proxy', 'PromiseProxy', 'try_import', 'maybe_evaluate'] - -__module__ = __name__ # used by Proxy class body - -PY3 = sys.version_info[0] == 3 - - -def _default_cls_attr(name, type_, cls_value): - # Proxy uses properties to forward the standard - # class attributes __module__, __name__ and __doc__ to the real - # object, but these needs to be a string when accessed from - # the Proxy class directly. This is a hack to make that work. - # -- See Issue #1087. - - def __new__(cls, getter): - instance = type_.__new__(cls, cls_value) - instance.__getter = getter - return instance - - def __get__(self, obj, cls=None): - return self.__getter(obj) if obj is not None else self - - return type(name, (type_, ), { - '__new__': __new__, '__get__': __get__, - }) - - -def try_import(module, default=None): - """Try to import and return module, or return - None if the module does not exist.""" - try: - return importlib.import_module(module) - except ImportError: - return default - - -class Proxy(object): - """Proxy to another object.""" - - # Code stolen from werkzeug.local.Proxy. - __slots__ = ('__local', '__args', '__kwargs', '__dict__') - - def __init__(self, local, - args=None, kwargs=None, name=None, __doc__=None): - object.__setattr__(self, '_Proxy__local', local) - object.__setattr__(self, '_Proxy__args', args or ()) - object.__setattr__(self, '_Proxy__kwargs', kwargs or {}) - if name is not None: - object.__setattr__(self, '__custom_name__', name) - if __doc__ is not None: - object.__setattr__(self, '__doc__', __doc__) - - @_default_cls_attr('name', str, __name__) - def __name__(self): - try: - return self.__custom_name__ - except AttributeError: - return self._get_current_object().__name__ - - @_default_cls_attr('module', str, __module__) - def __module__(self): - return self._get_current_object().__module__ - - @_default_cls_attr('doc', str, __doc__) - def __doc__(self): - return self._get_current_object().__doc__ - - def _get_class(self): - return self._get_current_object().__class__ - - @property - def __class__(self): - return self._get_class() - - def _get_current_object(self): - """Return the current object. This is useful if you want the real - object behind the proxy at a time for performance reasons or because - you want to pass the object into a different context. - """ - loc = object.__getattribute__(self, '_Proxy__local') - if not hasattr(loc, '__release_local__'): - return loc(*self.__args, **self.__kwargs) - try: - return getattr(loc, self.__name__) - except AttributeError: - raise RuntimeError('no object bound to {0.__name__}'.format(self)) - - @property - def __dict__(self): - try: - return self._get_current_object().__dict__ - except RuntimeError: # pragma: no cover - raise AttributeError('__dict__') - - def __repr__(self): - try: - obj = self._get_current_object() - except RuntimeError: # pragma: no cover - return '<{0} unbound>'.format(self.__class__.__name__) - return repr(obj) - - def __bool__(self): - try: - return bool(self._get_current_object()) - except RuntimeError: # pragma: no cover - return False - __nonzero__ = __bool__ # Py2 - - def __unicode__(self): - try: - return string(self._get_current_object()) - except RuntimeError: # pragma: no cover - return repr(self) - - def __dir__(self): - try: - return dir(self._get_current_object()) - except RuntimeError: # pragma: no cover - return [] - - def __getattr__(self, name): - if name == '__members__': - return dir(self._get_current_object()) - return getattr(self._get_current_object(), name) - - def __setitem__(self, key, value): - self._get_current_object()[key] = value - - def __delitem__(self, key): - del self._get_current_object()[key] - - def __setslice__(self, i, j, seq): - self._get_current_object()[i:j] = seq - - def __delslice__(self, i, j): - del self._get_current_object()[i:j] - - def __setattr__(self, name, value): - setattr(self._get_current_object(), name, value) - - def __delattr__(self, name): - delattr(self._get_current_object(), name) - - def __str__(self): - return str(self._get_current_object()) - - def __lt__(self, other): - return self._get_current_object() < other - - def __le__(self, other): - return self._get_current_object() <= other - - def __eq__(self, other): - return self._get_current_object() == other - - def __ne__(self, other): - return self._get_current_object() != other - - def __gt__(self, other): - return self._get_current_object() > other - - def __ge__(self, other): - return self._get_current_object() >= other - - def __hash__(self): - return hash(self._get_current_object()) - - def __call__(self, *a, **kw): - return self._get_current_object()(*a, **kw) - - def __len__(self): - return len(self._get_current_object()) - - def __getitem__(self, i): - return self._get_current_object()[i] - - def __iter__(self): - return iter(self._get_current_object()) - - def __contains__(self, i): - return i in self._get_current_object() - - def __getslice__(self, i, j): - return self._get_current_object()[i:j] - - def __add__(self, other): - return self._get_current_object() + other - - def __sub__(self, other): - return self._get_current_object() - other - - def __mul__(self, other): - return self._get_current_object() * other - - def __floordiv__(self, other): - return self._get_current_object() // other - - def __mod__(self, other): - return self._get_current_object() % other - - def __divmod__(self, other): - return self._get_current_object().__divmod__(other) - - def __pow__(self, other): - return self._get_current_object() ** other - - def __lshift__(self, other): - return self._get_current_object() << other - - def __rshift__(self, other): - return self._get_current_object() >> other - - def __and__(self, other): - return self._get_current_object() & other - - def __xor__(self, other): - return self._get_current_object() ^ other - - def __or__(self, other): - return self._get_current_object() | other - - def __div__(self, other): - return self._get_current_object().__div__(other) - - def __truediv__(self, other): - return self._get_current_object().__truediv__(other) - - def __neg__(self): - return -(self._get_current_object()) - - def __pos__(self): - return +(self._get_current_object()) - - def __abs__(self): - return abs(self._get_current_object()) - - def __invert__(self): - return ~(self._get_current_object()) - - def __complex__(self): - return complex(self._get_current_object()) - - def __int__(self): - return int(self._get_current_object()) - - def __float__(self): - return float(self._get_current_object()) - - def __oct__(self): - return oct(self._get_current_object()) - - def __hex__(self): - return hex(self._get_current_object()) - - def __index__(self): - return self._get_current_object().__index__() - - def __coerce__(self, other): - return self._get_current_object().__coerce__(other) - - def __enter__(self): - return self._get_current_object().__enter__() - - def __exit__(self, *a, **kw): - return self._get_current_object().__exit__(*a, **kw) - - def __reduce__(self): - return self._get_current_object().__reduce__() - - if not PY3: - def __cmp__(self, other): - return cmp(self._get_current_object(), other) # noqa - - def __long__(self): - return long(self._get_current_object()) # noqa - - -class PromiseProxy(Proxy): - """This is a proxy to an object that has not yet been evaulated. - - :class:`Proxy` will evaluate the object each time, while the - promise will only evaluate it once. - - """ - - __slots__ = ('__pending__', ) - - def _get_current_object(self): - try: - return object.__getattribute__(self, '__thing') - except AttributeError: - return self.__evaluate__() - - def __then__(self, fun, *args, **kwargs): - if self.__evaluated__(): - return fun(*args, **kwargs) - from collections import deque - try: - pending = object.__getattribute__(self, '__pending__') - except AttributeError: - pending = None - if pending is None: - pending = deque() - object.__setattr__(self, '__pending__', pending) - pending.append((fun, args, kwargs)) - - def __evaluated__(self): - try: - object.__getattribute__(self, '__thing') - except AttributeError: - return False - return True - - def __maybe_evaluate__(self): - return self._get_current_object() - - def __evaluate__(self, - _clean=('_Proxy__local', - '_Proxy__args', - '_Proxy__kwargs')): - try: - thing = Proxy._get_current_object(self) - except: - raise - else: - object.__setattr__(self, '__thing', thing) - for attr in _clean: - try: - object.__delattr__(self, attr) - except AttributeError: # pragma: no cover - # May mask errors so ignore - pass - try: - pending = object.__getattribute__(self, '__pending__') - except AttributeError: - pass - else: - try: - while pending: - fun, args, kwargs = pending.popleft() - fun(*args, **kwargs) - finally: - try: - object.__delattr__(self, '__pending__') - except AttributeError: - pass - return thing - - -def maybe_evaluate(obj): - try: - return obj.__maybe_evaluate__() - except AttributeError: - return obj diff --git a/thesisenv/lib/python3.6/site-packages/celery/platforms.py b/thesisenv/lib/python3.6/site-packages/celery/platforms.py deleted file mode 100644 index b0242d5..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/platforms.py +++ /dev/null @@ -1,813 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.platforms - ~~~~~~~~~~~~~~~~ - - Utilities dealing with platform specifics: signals, daemonization, - users, groups, and so on. - -""" -from __future__ import absolute_import, print_function - -import atexit -import errno -import math -import numbers -import os -import platform as _platform -import signal as _signal -import sys -import warnings - -from collections import namedtuple - -from billiard import current_process -# fileno used to be in this module -from kombu.utils import maybe_fileno -from kombu.utils.compat import get_errno -from kombu.utils.encoding import safe_str -from contextlib import contextmanager - -from .local import try_import -from .five import items, range, reraise, string_t, zip_longest -from .utils.functional import uniq - -_setproctitle = try_import('setproctitle') -resource = try_import('resource') -pwd = try_import('pwd') -grp = try_import('grp') -mputil = try_import('multiprocessing.util') - -__all__ = ['EX_OK', 'EX_FAILURE', 'EX_UNAVAILABLE', 'EX_USAGE', 'SYSTEM', - 'IS_OSX', 'IS_WINDOWS', 'pyimplementation', 'LockFailed', - 'get_fdmax', 'Pidfile', 'create_pidlock', - 'close_open_fds', 'DaemonContext', 'detached', 'parse_uid', - 'parse_gid', 'setgroups', 'initgroups', 'setgid', 'setuid', - 'maybe_drop_privileges', 'signals', 'set_process_title', - 'set_mp_process_title', 'get_errno_name', 'ignore_errno', - 'fd_by_path'] - -# exitcodes -EX_OK = getattr(os, 'EX_OK', 0) -EX_FAILURE = 1 -EX_UNAVAILABLE = getattr(os, 'EX_UNAVAILABLE', 69) -EX_USAGE = getattr(os, 'EX_USAGE', 64) -EX_CANTCREAT = getattr(os, 'EX_CANTCREAT', 73) - -SYSTEM = _platform.system() -IS_OSX = SYSTEM == 'Darwin' -IS_WINDOWS = SYSTEM == 'Windows' - -DAEMON_WORKDIR = '/' - -PIDFILE_FLAGS = os.O_CREAT | os.O_EXCL | os.O_WRONLY -PIDFILE_MODE = ((os.R_OK | os.W_OK) << 6) | ((os.R_OK) << 3) | ((os.R_OK)) - -PIDLOCKED = """ERROR: Pidfile ({0}) already exists. -Seems we're already running? (pid: {1})""" - -_range = namedtuple('_range', ('start', 'stop')) - -C_FORCE_ROOT = os.environ.get('C_FORCE_ROOT', False) - -ROOT_DISALLOWED = """\ -Running a worker with superuser privileges when the -worker accepts messages serialized with pickle is a very bad idea! - -If you really want to continue then you have to set the C_FORCE_ROOT -environment variable (but please think about this before you do). - -User information: uid={uid} euid={euid} gid={gid} egid={egid} -""" - -ROOT_DISCOURAGED = """\ -You are running the worker with superuser privileges, which is -absolutely not recommended! - -Please specify a different user using the -u option. - -User information: uid={uid} euid={euid} gid={gid} egid={egid} -""" - - -def pyimplementation(): - """Return string identifying the current Python implementation.""" - if hasattr(_platform, 'python_implementation'): - return _platform.python_implementation() - elif sys.platform.startswith('java'): - return 'Jython ' + sys.platform - elif hasattr(sys, 'pypy_version_info'): - v = '.'.join(str(p) for p in sys.pypy_version_info[:3]) - if sys.pypy_version_info[3:]: - v += '-' + ''.join(str(p) for p in sys.pypy_version_info[3:]) - return 'PyPy ' + v - else: - return 'CPython' - - -class LockFailed(Exception): - """Raised if a pidlock can't be acquired.""" - - -def get_fdmax(default=None): - """Return the maximum number of open file descriptors - on this system. - - :keyword default: Value returned if there's no file - descriptor limit. - - """ - try: - return os.sysconf('SC_OPEN_MAX') - except: - pass - if resource is None: # Windows - return default - fdmax = resource.getrlimit(resource.RLIMIT_NOFILE)[1] - if fdmax == resource.RLIM_INFINITY: - return default - return fdmax - - -class Pidfile(object): - """Pidfile - - This is the type returned by :func:`create_pidlock`. - - TIP: Use the :func:`create_pidlock` function instead, - which is more convenient and also removes stale pidfiles (when - the process holding the lock is no longer running). - - """ - - #: Path to the pid lock file. - path = None - - def __init__(self, path): - self.path = os.path.abspath(path) - - def acquire(self): - """Acquire lock.""" - try: - self.write_pid() - except OSError as exc: - reraise(LockFailed, LockFailed(str(exc)), sys.exc_info()[2]) - return self - __enter__ = acquire - - def is_locked(self): - """Return true if the pid lock exists.""" - return os.path.exists(self.path) - - def release(self, *args): - """Release lock.""" - self.remove() - __exit__ = release - - def read_pid(self): - """Read and return the current pid.""" - with ignore_errno('ENOENT'): - with open(self.path, 'r') as fh: - line = fh.readline() - if line.strip() == line: # must contain '\n' - raise ValueError( - 'Partial or invalid pidfile {0.path}'.format(self)) - - try: - return int(line.strip()) - except ValueError: - raise ValueError( - 'pidfile {0.path} contents invalid.'.format(self)) - - def remove(self): - """Remove the lock.""" - with ignore_errno(errno.ENOENT, errno.EACCES): - os.unlink(self.path) - - def remove_if_stale(self): - """Remove the lock if the process is not running. - (does not respond to signals).""" - try: - pid = self.read_pid() - except ValueError as exc: - print('Broken pidfile found. Removing it.', file=sys.stderr) - self.remove() - return True - if not pid: - self.remove() - return True - - try: - os.kill(pid, 0) - except os.error as exc: - if exc.errno == errno.ESRCH: - print('Stale pidfile exists. Removing it.', file=sys.stderr) - self.remove() - return True - return False - - def write_pid(self): - pid = os.getpid() - content = '{0}\n'.format(pid) - - pidfile_fd = os.open(self.path, PIDFILE_FLAGS, PIDFILE_MODE) - pidfile = os.fdopen(pidfile_fd, 'w') - try: - pidfile.write(content) - # flush and sync so that the re-read below works. - pidfile.flush() - try: - os.fsync(pidfile_fd) - except AttributeError: # pragma: no cover - pass - finally: - pidfile.close() - - rfh = open(self.path) - try: - if rfh.read() != content: - raise LockFailed( - "Inconsistency: Pidfile content doesn't match at re-read") - finally: - rfh.close() -PIDFile = Pidfile # compat alias - - -def create_pidlock(pidfile): - """Create and verify pidfile. - - If the pidfile already exists the program exits with an error message, - however if the process it refers to is not running anymore, the pidfile - is deleted and the program continues. - - This function will automatically install an :mod:`atexit` handler - to release the lock at exit, you can skip this by calling - :func:`_create_pidlock` instead. - - :returns: :class:`Pidfile`. - - **Example**: - - .. code-block:: python - - pidlock = create_pidlock('/var/run/app.pid') - - """ - pidlock = _create_pidlock(pidfile) - atexit.register(pidlock.release) - return pidlock - - -def _create_pidlock(pidfile): - pidlock = Pidfile(pidfile) - if pidlock.is_locked() and not pidlock.remove_if_stale(): - print(PIDLOCKED.format(pidfile, pidlock.read_pid()), file=sys.stderr) - raise SystemExit(EX_CANTCREAT) - pidlock.acquire() - return pidlock - - -def fd_by_path(paths): - """Return a list of fds. - - This method returns list of fds corresponding to - file paths passed in paths variable. - - :keyword paths: List of file paths go get fd for. - - :returns: :list:. - - **Example**: - - .. code-block:: python - - keep = fd_by_path(['/dev/urandom', - '/my/precious/']) - """ - stats = set() - for path in paths: - try: - fd = os.open(path, os.O_RDONLY) - except OSError: - continue - try: - stats.add(os.fstat(fd)[1:3]) - finally: - os.close(fd) - - def fd_in_stats(fd): - try: - return os.fstat(fd)[1:3] in stats - except OSError: - return False - - return [_fd for _fd in range(get_fdmax(2048)) if fd_in_stats(_fd)] - - -if hasattr(os, 'closerange'): - - def close_open_fds(keep=None): - # must make sure this is 0-inclusive (Issue #1882) - keep = list(uniq(sorted( - f for f in map(maybe_fileno, keep or []) if f is not None - ))) - maxfd = get_fdmax(default=2048) - kL, kH = iter([-1] + keep), iter(keep + [maxfd]) - for low, high in zip_longest(kL, kH): - if low + 1 != high: - os.closerange(low + 1, high) - -else: - - def close_open_fds(keep=None): # noqa - keep = [maybe_fileno(f) - for f in (keep or []) if maybe_fileno(f) is not None] - for fd in reversed(range(get_fdmax(default=2048))): - if fd not in keep: - with ignore_errno(errno.EBADF): - os.close(fd) - - -class DaemonContext(object): - _is_open = False - - def __init__(self, pidfile=None, workdir=None, umask=None, - fake=False, after_chdir=None, after_forkers=True, - **kwargs): - if isinstance(umask, string_t): - # octal or decimal, depending on initial zero. - umask = int(umask, 8 if umask.startswith('0') else 10) - self.workdir = workdir or DAEMON_WORKDIR - self.umask = umask - self.fake = fake - self.after_chdir = after_chdir - self.after_forkers = after_forkers - self.stdfds = (sys.stdin, sys.stdout, sys.stderr) - - def redirect_to_null(self, fd): - if fd is not None: - dest = os.open(os.devnull, os.O_RDWR) - os.dup2(dest, fd) - - def open(self): - if not self._is_open: - if not self.fake: - self._detach() - - os.chdir(self.workdir) - if self.umask is not None: - os.umask(self.umask) - - if self.after_chdir: - self.after_chdir() - - if not self.fake: - # We need to keep /dev/urandom from closing because - # shelve needs it, and Beat needs shelve to start. - keep = list(self.stdfds) + fd_by_path(['/dev/urandom']) - close_open_fds(keep) - for fd in self.stdfds: - self.redirect_to_null(maybe_fileno(fd)) - if self.after_forkers and mputil is not None: - mputil._run_after_forkers() - - self._is_open = True - __enter__ = open - - def close(self, *args): - if self._is_open: - self._is_open = False - __exit__ = close - - def _detach(self): - if os.fork() == 0: # first child - os.setsid() # create new session - if os.fork() > 0: # second child - os._exit(0) - else: - os._exit(0) - return self - - -def detached(logfile=None, pidfile=None, uid=None, gid=None, umask=0, - workdir=None, fake=False, **opts): - """Detach the current process in the background (daemonize). - - :keyword logfile: Optional log file. The ability to write to this file - will be verified before the process is detached. - :keyword pidfile: Optional pidfile. The pidfile will not be created, - as this is the responsibility of the child. But the process will - exit if the pid lock exists and the pid written is still running. - :keyword uid: Optional user id or user name to change - effective privileges to. - :keyword gid: Optional group id or group name to change effective - privileges to. - :keyword umask: Optional umask that will be effective in the child process. - :keyword workdir: Optional new working directory. - :keyword fake: Don't actually detach, intented for debugging purposes. - :keyword \*\*opts: Ignored. - - **Example**: - - .. code-block:: python - - from celery.platforms import detached, create_pidlock - - with detached(logfile='/var/log/app.log', pidfile='/var/run/app.pid', - uid='nobody'): - # Now in detached child process with effective user set to nobody, - # and we know that our logfile can be written to, and that - # the pidfile is not locked. - pidlock = create_pidlock('/var/run/app.pid') - - # Run the program - program.run(logfile='/var/log/app.log') - - """ - - if not resource: - raise RuntimeError('This platform does not support detach.') - workdir = os.getcwd() if workdir is None else workdir - - signals.reset('SIGCLD') # Make sure SIGCLD is using the default handler. - maybe_drop_privileges(uid=uid, gid=gid) - - def after_chdir_do(): - # Since without stderr any errors will be silently suppressed, - # we need to know that we have access to the logfile. - logfile and open(logfile, 'a').close() - # Doesn't actually create the pidfile, but makes sure it's not stale. - if pidfile: - _create_pidlock(pidfile).release() - - return DaemonContext( - umask=umask, workdir=workdir, fake=fake, after_chdir=after_chdir_do, - ) - - -def parse_uid(uid): - """Parse user id. - - uid can be an integer (uid) or a string (user name), if a user name - the uid is taken from the system user registry. - - """ - try: - return int(uid) - except ValueError: - try: - return pwd.getpwnam(uid).pw_uid - except (AttributeError, KeyError): - raise KeyError('User does not exist: {0}'.format(uid)) - - -def parse_gid(gid): - """Parse group id. - - gid can be an integer (gid) or a string (group name), if a group name - the gid is taken from the system group registry. - - """ - try: - return int(gid) - except ValueError: - try: - return grp.getgrnam(gid).gr_gid - except (AttributeError, KeyError): - raise KeyError('Group does not exist: {0}'.format(gid)) - - -def _setgroups_hack(groups): - """:fun:`setgroups` may have a platform-dependent limit, - and it is not always possible to know in advance what this limit - is, so we use this ugly hack stolen from glibc.""" - groups = groups[:] - - while 1: - try: - return os.setgroups(groups) - except ValueError: # error from Python's check. - if len(groups) <= 1: - raise - groups[:] = groups[:-1] - except OSError as exc: # error from the OS. - if exc.errno != errno.EINVAL or len(groups) <= 1: - raise - groups[:] = groups[:-1] - - -def setgroups(groups): - """Set active groups from a list of group ids.""" - max_groups = None - try: - max_groups = os.sysconf('SC_NGROUPS_MAX') - except Exception: - pass - try: - return _setgroups_hack(groups[:max_groups]) - except OSError as exc: - if exc.errno != errno.EPERM: - raise - if any(group not in groups for group in os.getgroups()): - # we shouldn't be allowed to change to this group. - raise - - -def initgroups(uid, gid): - """Compat version of :func:`os.initgroups` which was first - added to Python 2.7.""" - if not pwd: # pragma: no cover - return - username = pwd.getpwuid(uid)[0] - if hasattr(os, 'initgroups'): # Python 2.7+ - return os.initgroups(username, gid) - groups = [gr.gr_gid for gr in grp.getgrall() - if username in gr.gr_mem] - setgroups(groups) - - -def setgid(gid): - """Version of :func:`os.setgid` supporting group names.""" - os.setgid(parse_gid(gid)) - - -def setuid(uid): - """Version of :func:`os.setuid` supporting usernames.""" - os.setuid(parse_uid(uid)) - - -def maybe_drop_privileges(uid=None, gid=None): - """Change process privileges to new user/group. - - If UID and GID is specified, the real user/group is changed. - - If only UID is specified, the real user is changed, and the group is - changed to the users primary group. - - If only GID is specified, only the group is changed. - - """ - if sys.platform == 'win32': - return - if os.geteuid(): - # no point trying to setuid unless we're root. - if not os.getuid(): - raise AssertionError('contact support') - uid = uid and parse_uid(uid) - gid = gid and parse_gid(gid) - - if uid: - # If GID isn't defined, get the primary GID of the user. - if not gid and pwd: - gid = pwd.getpwuid(uid).pw_gid - # Must set the GID before initgroups(), as setgid() - # is known to zap the group list on some platforms. - - # setgid must happen before setuid (otherwise the setgid operation - # may fail because of insufficient privileges and possibly stay - # in a privileged group). - setgid(gid) - initgroups(uid, gid) - - # at last: - setuid(uid) - # ... and make sure privileges cannot be restored: - try: - setuid(0) - except OSError as exc: - if get_errno(exc) != errno.EPERM: - raise - pass # Good: cannot restore privileges. - else: - raise RuntimeError( - 'non-root user able to restore privileges after setuid.') - else: - gid and setgid(gid) - - if uid and (not os.getuid()) and not (os.geteuid()): - raise AssertionError('Still root uid after drop privileges!') - if gid and (not os.getgid()) and not (os.getegid()): - raise AssertionError('Still root gid after drop privileges!') - - -class Signals(object): - """Convenience interface to :mod:`signals`. - - If the requested signal is not supported on the current platform, - the operation will be ignored. - - **Examples**: - - .. code-block:: python - - >>> from celery.platforms import signals - - >>> from proj.handlers import my_handler - >>> signals['INT'] = my_handler - - >>> signals['INT'] - my_handler - - >>> signals.supported('INT') - True - - >>> signals.signum('INT') - 2 - - >>> signals.ignore('USR1') - >>> signals['USR1'] == signals.ignored - True - - >>> signals.reset('USR1') - >>> signals['USR1'] == signals.default - True - - >>> from proj.handlers import exit_handler, hup_handler - >>> signals.update(INT=exit_handler, - ... TERM=exit_handler, - ... HUP=hup_handler) - - """ - - ignored = _signal.SIG_IGN - default = _signal.SIG_DFL - - if hasattr(_signal, 'setitimer'): - - def arm_alarm(self, seconds): - _signal.setitimer(_signal.ITIMER_REAL, seconds) - else: # pragma: no cover - try: - from itimer import alarm as _itimer_alarm # noqa - except ImportError: - - def arm_alarm(self, seconds): # noqa - _signal.alarm(math.ceil(seconds)) - else: # pragma: no cover - - def arm_alarm(self, seconds): # noqa - return _itimer_alarm(seconds) # noqa - - def reset_alarm(self): - return _signal.alarm(0) - - def supported(self, signal_name): - """Return true value if ``signal_name`` exists on this platform.""" - try: - return self.signum(signal_name) - except AttributeError: - pass - - def signum(self, signal_name): - """Get signal number from signal name.""" - if isinstance(signal_name, numbers.Integral): - return signal_name - if not isinstance(signal_name, string_t) \ - or not signal_name.isupper(): - raise TypeError('signal name must be uppercase string.') - if not signal_name.startswith('SIG'): - signal_name = 'SIG' + signal_name - return getattr(_signal, signal_name) - - def reset(self, *signal_names): - """Reset signals to the default signal handler. - - Does nothing if the platform doesn't support signals, - or the specified signal in particular. - - """ - self.update((sig, self.default) for sig in signal_names) - - def ignore(self, *signal_names): - """Ignore signal using :const:`SIG_IGN`. - - Does nothing if the platform doesn't support signals, - or the specified signal in particular. - - """ - self.update((sig, self.ignored) for sig in signal_names) - - def __getitem__(self, signal_name): - return _signal.getsignal(self.signum(signal_name)) - - def __setitem__(self, signal_name, handler): - """Install signal handler. - - Does nothing if the current platform doesn't support signals, - or the specified signal in particular. - - """ - try: - _signal.signal(self.signum(signal_name), handler) - except (AttributeError, ValueError): - pass - - def update(self, _d_=None, **sigmap): - """Set signal handlers from a mapping.""" - for signal_name, handler in items(dict(_d_ or {}, **sigmap)): - self[signal_name] = handler - -signals = Signals() -get_signal = signals.signum # compat -install_signal_handler = signals.__setitem__ # compat -reset_signal = signals.reset # compat -ignore_signal = signals.ignore # compat - - -def strargv(argv): - arg_start = 2 if 'manage' in argv[0] else 1 - if len(argv) > arg_start: - return ' '.join(argv[arg_start:]) - return '' - - -def set_process_title(progname, info=None): - """Set the ps name for the currently running process. - - Only works if :mod:`setproctitle` is installed. - - """ - proctitle = '[{0}]'.format(progname) - proctitle = '{0} {1}'.format(proctitle, info) if info else proctitle - if _setproctitle: - _setproctitle.setproctitle(safe_str(proctitle)) - return proctitle - - -if os.environ.get('NOSETPS'): # pragma: no cover - - def set_mp_process_title(*a, **k): - pass -else: - - def set_mp_process_title(progname, info=None, hostname=None): # noqa - """Set the ps name using the multiprocessing process name. - - Only works if :mod:`setproctitle` is installed. - - """ - if hostname: - progname = '{0}: {1}'.format(progname, hostname) - return set_process_title( - '{0}:{1}'.format(progname, current_process().name), info=info) - - -def get_errno_name(n): - """Get errno for string, e.g. ``ENOENT``.""" - if isinstance(n, string_t): - return getattr(errno, n) - return n - - -@contextmanager -def ignore_errno(*errnos, **kwargs): - """Context manager to ignore specific POSIX error codes. - - Takes a list of error codes to ignore, which can be either - the name of the code, or the code integer itself:: - - >>> with ignore_errno('ENOENT'): - ... with open('foo', 'r') as fh: - ... return fh.read() - - >>> with ignore_errno(errno.ENOENT, errno.EPERM): - ... pass - - :keyword types: A tuple of exceptions to ignore (when the errno matches), - defaults to :exc:`Exception`. - """ - types = kwargs.get('types') or (Exception, ) - errnos = [get_errno_name(errno) for errno in errnos] - try: - yield - except types as exc: - if not hasattr(exc, 'errno'): - raise - if exc.errno not in errnos: - raise - - -def check_privileges(accept_content): - uid = os.getuid() if hasattr(os, 'getuid') else 65535 - gid = os.getgid() if hasattr(os, 'getgid') else 65535 - euid = os.geteuid() if hasattr(os, 'geteuid') else 65535 - egid = os.getegid() if hasattr(os, 'getegid') else 65535 - - if hasattr(os, 'fchown'): - if not all(hasattr(os, attr) - for attr in ['getuid', 'getgid', 'geteuid', 'getegid']): - raise AssertionError('suspicious platform, contact support') - - if not uid or not gid or not euid or not egid: - if ('pickle' in accept_content or - 'application/x-python-serialize' in accept_content): - if not C_FORCE_ROOT: - try: - print(ROOT_DISALLOWED.format( - uid=uid, euid=euid, gid=gid, egid=egid, - ), file=sys.stderr) - finally: - os._exit(1) - warnings.warn(RuntimeWarning(ROOT_DISCOURAGED.format( - uid=uid, euid=euid, gid=gid, egid=egid, - ))) diff --git a/thesisenv/lib/python3.6/site-packages/celery/result.py b/thesisenv/lib/python3.6/site-packages/celery/result.py deleted file mode 100644 index bf49d72..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/result.py +++ /dev/null @@ -1,925 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.result - ~~~~~~~~~~~~~ - - Task results/state and groups of results. - -""" -from __future__ import absolute_import - -import time -import warnings - -from collections import deque -from contextlib import contextmanager -from copy import copy - -from kombu.utils import cached_property -from kombu.utils.compat import OrderedDict - -from . import current_app -from . import states -from ._state import _set_task_join_will_block, task_join_will_block -from .app import app_or_default -from .datastructures import DependencyGraph, GraphFormatter -from .exceptions import IncompleteStream, TimeoutError -from .five import items, range, string_t, monotonic -from .utils import deprecated - -__all__ = ['ResultBase', 'AsyncResult', 'ResultSet', 'GroupResult', - 'EagerResult', 'result_from_tuple'] - -E_WOULDBLOCK = """\ -Never call result.get() within a task! -See http://docs.celeryq.org/en/latest/userguide/tasks.html\ -#task-synchronous-subtasks - -In Celery 3.2 this will result in an exception being -raised instead of just being a warning. -""" - - -def assert_will_not_block(): - if task_join_will_block(): - warnings.warn(RuntimeWarning(E_WOULDBLOCK)) - - -@contextmanager -def allow_join_result(): - reset_value = task_join_will_block() - _set_task_join_will_block(False) - try: - yield - finally: - _set_task_join_will_block(reset_value) - - -class ResultBase(object): - """Base class for all results""" - - #: Parent result (if part of a chain) - parent = None - - -class AsyncResult(ResultBase): - """Query task state. - - :param id: see :attr:`id`. - :keyword backend: see :attr:`backend`. - - """ - app = None - - #: Error raised for timeouts. - TimeoutError = TimeoutError - - #: The task's UUID. - id = None - - #: The task result backend to use. - backend = None - - def __init__(self, id, backend=None, task_name=None, - app=None, parent=None): - self.app = app_or_default(app or self.app) - self.id = id - self.backend = backend or self.app.backend - self.task_name = task_name - self.parent = parent - self._cache = None - - def as_tuple(self): - parent = self.parent - return (self.id, parent and parent.as_tuple()), None - serializable = as_tuple # XXX compat - - def forget(self): - """Forget about (and possibly remove the result of) this task.""" - self._cache = None - self.backend.forget(self.id) - - def revoke(self, connection=None, terminate=False, signal=None, - wait=False, timeout=None): - """Send revoke signal to all workers. - - Any worker receiving the task, or having reserved the - task, *must* ignore it. - - :keyword terminate: Also terminate the process currently working - on the task (if any). - :keyword signal: Name of signal to send to process if terminate. - Default is TERM. - :keyword wait: Wait for replies from workers. Will wait for 1 second - by default or you can specify a custom ``timeout``. - :keyword timeout: Time in seconds to wait for replies if ``wait`` - enabled. - - """ - self.app.control.revoke(self.id, connection=connection, - terminate=terminate, signal=signal, - reply=wait, timeout=timeout) - - def get(self, timeout=None, propagate=True, interval=0.5, - no_ack=True, follow_parents=True, - EXCEPTION_STATES=states.EXCEPTION_STATES, - PROPAGATE_STATES=states.PROPAGATE_STATES): - """Wait until task is ready, and return its result. - - .. warning:: - - Waiting for tasks within a task may lead to deadlocks. - Please read :ref:`task-synchronous-subtasks`. - - :keyword timeout: How long to wait, in seconds, before the - operation times out. - :keyword propagate: Re-raise exception if the task failed. - :keyword interval: Time to wait (in seconds) before retrying to - retrieve the result. Note that this does not have any effect - when using the amqp result store backend, as it does not - use polling. - :keyword no_ack: Enable amqp no ack (automatically acknowledge - message). If this is :const:`False` then the message will - **not be acked**. - :keyword follow_parents: Reraise any exception raised by parent task. - - :raises celery.exceptions.TimeoutError: if `timeout` is not - :const:`None` and the result does not arrive within `timeout` - seconds. - - If the remote call raised an exception then that exception will - be re-raised. - - """ - assert_will_not_block() - on_interval = None - if follow_parents and propagate and self.parent: - on_interval = self._maybe_reraise_parent_error - on_interval() - - if self._cache: - if propagate: - self.maybe_reraise() - return self.result - - meta = self.backend.wait_for( - self.id, timeout=timeout, - interval=interval, - on_interval=on_interval, - no_ack=no_ack, - ) - if meta: - self._maybe_set_cache(meta) - status = meta['status'] - if status in PROPAGATE_STATES and propagate: - raise meta['result'] - return meta['result'] - wait = get # deprecated alias to :meth:`get`. - - def _maybe_reraise_parent_error(self): - for node in reversed(list(self._parents())): - node.maybe_reraise() - - def _parents(self): - node = self.parent - while node: - yield node - node = node.parent - - def collect(self, intermediate=False, **kwargs): - """Iterator, like :meth:`get` will wait for the task to complete, - but will also follow :class:`AsyncResult` and :class:`ResultSet` - returned by the task, yielding ``(result, value)`` tuples for each - result in the tree. - - An example would be having the following tasks: - - .. code-block:: python - - from celery import group - from proj.celery import app - - @app.task(trail=True) - def A(how_many): - return group(B.s(i) for i in range(how_many))() - - @app.task(trail=True) - def B(i): - return pow2.delay(i) - - @app.task(trail=True) - def pow2(i): - return i ** 2 - - Note that the ``trail`` option must be enabled - so that the list of children is stored in ``result.children``. - This is the default but enabled explicitly for illustration. - - Calling :meth:`collect` would return: - - .. code-block:: python - - >>> from celery.result import ResultBase - >>> from proj.tasks import A - - >>> result = A.delay(10) - >>> [v for v in result.collect() - ... if not isinstance(v, (ResultBase, tuple))] - [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] - - """ - for _, R in self.iterdeps(intermediate=intermediate): - yield R, R.get(**kwargs) - - def get_leaf(self): - value = None - for _, R in self.iterdeps(): - value = R.get() - return value - - def iterdeps(self, intermediate=False): - stack = deque([(None, self)]) - - while stack: - parent, node = stack.popleft() - yield parent, node - if node.ready(): - stack.extend((node, child) for child in node.children or []) - else: - if not intermediate: - raise IncompleteStream() - - def ready(self): - """Returns :const:`True` if the task has been executed. - - If the task is still running, pending, or is waiting - for retry then :const:`False` is returned. - - """ - return self.state in self.backend.READY_STATES - - def successful(self): - """Returns :const:`True` if the task executed successfully.""" - return self.state == states.SUCCESS - - def failed(self): - """Returns :const:`True` if the task failed.""" - return self.state == states.FAILURE - - def maybe_reraise(self): - if self.state in states.PROPAGATE_STATES: - raise self.result - - def build_graph(self, intermediate=False, formatter=None): - graph = DependencyGraph( - formatter=formatter or GraphFormatter(root=self.id, shape='oval'), - ) - for parent, node in self.iterdeps(intermediate=intermediate): - graph.add_arc(node) - if parent: - graph.add_edge(parent, node) - return graph - - def __str__(self): - """`str(self) -> self.id`""" - return str(self.id) - - def __hash__(self): - """`hash(self) -> hash(self.id)`""" - return hash(self.id) - - def __repr__(self): - return '<{0}: {1}>'.format(type(self).__name__, self.id) - - def __eq__(self, other): - if isinstance(other, AsyncResult): - return other.id == self.id - elif isinstance(other, string_t): - return other == self.id - return NotImplemented - - def __ne__(self, other): - return not self.__eq__(other) - - def __copy__(self): - return self.__class__( - self.id, self.backend, self.task_name, self.app, self.parent, - ) - - def __reduce__(self): - return self.__class__, self.__reduce_args__() - - def __reduce_args__(self): - return self.id, self.backend, self.task_name, None, self.parent - - def __del__(self): - self._cache = None - - @cached_property - def graph(self): - return self.build_graph() - - @property - def supports_native_join(self): - return self.backend.supports_native_join - - @property - def children(self): - return self._get_task_meta().get('children') - - def _maybe_set_cache(self, meta): - if meta: - state = meta['status'] - if state == states.SUCCESS or state in states.PROPAGATE_STATES: - return self._set_cache(meta) - return meta - - def _get_task_meta(self): - if self._cache is None: - return self._maybe_set_cache(self.backend.get_task_meta(self.id)) - return self._cache - - def _set_cache(self, d): - children = d.get('children') - if children: - d['children'] = [ - result_from_tuple(child, self.app) for child in children - ] - self._cache = d - return d - - @property - def result(self): - """When the task has been executed, this contains the return value. - If the task raised an exception, this will be the exception - instance.""" - return self._get_task_meta()['result'] - info = result - - @property - def traceback(self): - """Get the traceback of a failed task.""" - return self._get_task_meta().get('traceback') - - @property - def state(self): - """The tasks current state. - - Possible values includes: - - *PENDING* - - The task is waiting for execution. - - *STARTED* - - The task has been started. - - *RETRY* - - The task is to be retried, possibly because of failure. - - *FAILURE* - - The task raised an exception, or has exceeded the retry limit. - The :attr:`result` attribute then contains the - exception raised by the task. - - *SUCCESS* - - The task executed successfully. The :attr:`result` attribute - then contains the tasks return value. - - """ - return self._get_task_meta()['status'] - status = state - - @property - def task_id(self): - """compat alias to :attr:`id`""" - return self.id - - @task_id.setter # noqa - def task_id(self, id): - self.id = id -BaseAsyncResult = AsyncResult # for backwards compatibility. - - -class ResultSet(ResultBase): - """Working with more than one result. - - :param results: List of result instances. - - """ - app = None - - #: List of results in in the set. - results = None - - def __init__(self, results, app=None, **kwargs): - self.app = app_or_default(app or self.app) - self.results = results - - def add(self, result): - """Add :class:`AsyncResult` as a new member of the set. - - Does nothing if the result is already a member. - - """ - if result not in self.results: - self.results.append(result) - - def remove(self, result): - """Remove result from the set; it must be a member. - - :raises KeyError: if the result is not a member. - - """ - if isinstance(result, string_t): - result = self.app.AsyncResult(result) - try: - self.results.remove(result) - except ValueError: - raise KeyError(result) - - def discard(self, result): - """Remove result from the set if it is a member. - - If it is not a member, do nothing. - - """ - try: - self.remove(result) - except KeyError: - pass - - def update(self, results): - """Update set with the union of itself and an iterable with - results.""" - self.results.extend(r for r in results if r not in self.results) - - def clear(self): - """Remove all results from this set.""" - self.results[:] = [] # don't create new list. - - def successful(self): - """Was all of the tasks successful? - - :returns: :const:`True` if all of the tasks finished - successfully (i.e. did not raise an exception). - - """ - return all(result.successful() for result in self.results) - - def failed(self): - """Did any of the tasks fail? - - :returns: :const:`True` if one of the tasks failed. - (i.e., raised an exception) - - """ - return any(result.failed() for result in self.results) - - def maybe_reraise(self): - for result in self.results: - result.maybe_reraise() - - def waiting(self): - """Are any of the tasks incomplete? - - :returns: :const:`True` if one of the tasks are still - waiting for execution. - - """ - return any(not result.ready() for result in self.results) - - def ready(self): - """Did all of the tasks complete? (either by success of failure). - - :returns: :const:`True` if all of the tasks has been - executed. - - """ - return all(result.ready() for result in self.results) - - def completed_count(self): - """Task completion count. - - :returns: the number of tasks completed. - - """ - return sum(int(result.successful()) for result in self.results) - - def forget(self): - """Forget about (and possible remove the result of) all the tasks.""" - for result in self.results: - result.forget() - - def revoke(self, connection=None, terminate=False, signal=None, - wait=False, timeout=None): - """Send revoke signal to all workers for all tasks in the set. - - :keyword terminate: Also terminate the process currently working - on the task (if any). - :keyword signal: Name of signal to send to process if terminate. - Default is TERM. - :keyword wait: Wait for replies from worker. Will wait for 1 second - by default or you can specify a custom ``timeout``. - :keyword timeout: Time in seconds to wait for replies if ``wait`` - enabled. - - """ - self.app.control.revoke([r.id for r in self.results], - connection=connection, timeout=timeout, - terminate=terminate, signal=signal, reply=wait) - - def __iter__(self): - return iter(self.results) - - def __getitem__(self, index): - """`res[i] -> res.results[i]`""" - return self.results[index] - - @deprecated('3.2', '3.3') - def iterate(self, timeout=None, propagate=True, interval=0.5): - """Deprecated method, use :meth:`get` with a callback argument.""" - elapsed = 0.0 - results = OrderedDict((result.id, copy(result)) - for result in self.results) - - while results: - removed = set() - for task_id, result in items(results): - if result.ready(): - yield result.get(timeout=timeout and timeout - elapsed, - propagate=propagate) - removed.add(task_id) - else: - if result.backend.subpolling_interval: - time.sleep(result.backend.subpolling_interval) - for task_id in removed: - results.pop(task_id, None) - time.sleep(interval) - elapsed += interval - if timeout and elapsed >= timeout: - raise TimeoutError('The operation timed out') - - def get(self, timeout=None, propagate=True, interval=0.5, - callback=None, no_ack=True): - """See :meth:`join` - - This is here for API compatibility with :class:`AsyncResult`, - in addition it uses :meth:`join_native` if available for the - current result backend. - - """ - return (self.join_native if self.supports_native_join else self.join)( - timeout=timeout, propagate=propagate, - interval=interval, callback=callback, no_ack=no_ack) - - def join(self, timeout=None, propagate=True, interval=0.5, - callback=None, no_ack=True): - """Gathers the results of all tasks as a list in order. - - .. note:: - - This can be an expensive operation for result store - backends that must resort to polling (e.g. database). - - You should consider using :meth:`join_native` if your backend - supports it. - - .. warning:: - - Waiting for tasks within a task may lead to deadlocks. - Please see :ref:`task-synchronous-subtasks`. - - :keyword timeout: The number of seconds to wait for results before - the operation times out. - - :keyword propagate: If any of the tasks raises an exception, the - exception will be re-raised. - - :keyword interval: Time to wait (in seconds) before retrying to - retrieve a result from the set. Note that this - does not have any effect when using the amqp - result store backend, as it does not use polling. - - :keyword callback: Optional callback to be called for every result - received. Must have signature ``(task_id, value)`` - No results will be returned by this function if - a callback is specified. The order of results - is also arbitrary when a callback is used. - To get access to the result object for a particular - id you will have to generate an index first: - ``index = {r.id: r for r in gres.results.values()}`` - Or you can create new result objects on the fly: - ``result = app.AsyncResult(task_id)`` (both will - take advantage of the backend cache anyway). - - :keyword no_ack: Automatic message acknowledgement (Note that if this - is set to :const:`False` then the messages *will not be - acknowledged*). - - :raises celery.exceptions.TimeoutError: if ``timeout`` is not - :const:`None` and the operation takes longer than ``timeout`` - seconds. - - """ - assert_will_not_block() - time_start = monotonic() - remaining = None - - results = [] - for result in self.results: - remaining = None - if timeout: - remaining = timeout - (monotonic() - time_start) - if remaining <= 0.0: - raise TimeoutError('join operation timed out') - value = result.get( - timeout=remaining, propagate=propagate, - interval=interval, no_ack=no_ack, - ) - if callback: - callback(result.id, value) - else: - results.append(value) - return results - - def iter_native(self, timeout=None, interval=0.5, no_ack=True): - """Backend optimized version of :meth:`iterate`. - - .. versionadded:: 2.2 - - Note that this does not support collecting the results - for different task types using different backends. - - This is currently only supported by the amqp, Redis and cache - result backends. - - """ - results = self.results - if not results: - return iter([]) - return self.backend.get_many( - set(r.id for r in results), - timeout=timeout, interval=interval, no_ack=no_ack, - ) - - def join_native(self, timeout=None, propagate=True, - interval=0.5, callback=None, no_ack=True): - """Backend optimized version of :meth:`join`. - - .. versionadded:: 2.2 - - Note that this does not support collecting the results - for different task types using different backends. - - This is currently only supported by the amqp, Redis and cache - result backends. - - """ - assert_will_not_block() - order_index = None if callback else dict( - (result.id, i) for i, result in enumerate(self.results) - ) - acc = None if callback else [None for _ in range(len(self))] - for task_id, meta in self.iter_native(timeout, interval, no_ack): - value = meta['result'] - if propagate and meta['status'] in states.PROPAGATE_STATES: - raise value - if callback: - callback(task_id, value) - else: - acc[order_index[task_id]] = value - return acc - - def _failed_join_report(self): - return (res for res in self.results - if res.backend.is_cached(res.id) and - res.state in states.PROPAGATE_STATES) - - def __len__(self): - return len(self.results) - - def __eq__(self, other): - if isinstance(other, ResultSet): - return other.results == self.results - return NotImplemented - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return '<{0}: [{1}]>'.format(type(self).__name__, - ', '.join(r.id for r in self.results)) - - @property - def subtasks(self): - """Deprecated alias to :attr:`results`.""" - return self.results - - @property - def supports_native_join(self): - try: - return self.results[0].supports_native_join - except IndexError: - pass - - @property - def backend(self): - return self.app.backend if self.app else self.results[0].backend - - -class GroupResult(ResultSet): - """Like :class:`ResultSet`, but with an associated id. - - This type is returned by :class:`~celery.group`, and the - deprecated TaskSet, meth:`~celery.task.TaskSet.apply_async` method. - - It enables inspection of the tasks state and return values as - a single entity. - - :param id: The id of the group. - :param results: List of result instances. - - """ - - #: The UUID of the group. - id = None - - #: List/iterator of results in the group - results = None - - def __init__(self, id=None, results=None, **kwargs): - self.id = id - ResultSet.__init__(self, results, **kwargs) - - def save(self, backend=None): - """Save group-result for later retrieval using :meth:`restore`. - - Example:: - - >>> def save_and_restore(result): - ... result.save() - ... result = GroupResult.restore(result.id) - - """ - return (backend or self.app.backend).save_group(self.id, self) - - def delete(self, backend=None): - """Remove this result if it was previously saved.""" - (backend or self.app.backend).delete_group(self.id) - - def __reduce__(self): - return self.__class__, self.__reduce_args__() - - def __reduce_args__(self): - return self.id, self.results - - def __bool__(self): - return bool(self.id or self.results) - __nonzero__ = __bool__ # Included for Py2 backwards compatibility - - def __eq__(self, other): - if isinstance(other, GroupResult): - return other.id == self.id and other.results == self.results - return NotImplemented - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return '<{0}: {1} [{2}]>'.format(type(self).__name__, self.id, - ', '.join(r.id for r in self.results)) - - def as_tuple(self): - return self.id, [r.as_tuple() for r in self.results] - serializable = as_tuple # XXX compat - - @property - def children(self): - return self.results - - @classmethod - def restore(self, id, backend=None): - """Restore previously saved group result.""" - return ( - backend or (self.app.backend if self.app else current_app.backend) - ).restore_group(id) - - -class TaskSetResult(GroupResult): - """Deprecated version of :class:`GroupResult`""" - - def __init__(self, taskset_id, results=None, **kwargs): - # XXX supports the taskset_id kwarg. - # XXX previously the "results" arg was named "subtasks". - if 'subtasks' in kwargs: - results = kwargs['subtasks'] - GroupResult.__init__(self, taskset_id, results, **kwargs) - - def itersubtasks(self): - """Deprecated. Use ``iter(self.results)`` instead.""" - return iter(self.results) - - @property - def total(self): - """Deprecated: Use ``len(r)``.""" - return len(self) - - @property - def taskset_id(self): - """compat alias to :attr:`self.id`""" - return self.id - - @taskset_id.setter # noqa - def taskset_id(self, id): - self.id = id - - -class EagerResult(AsyncResult): - """Result that we know has already been executed.""" - task_name = None - - def __init__(self, id, ret_value, state, traceback=None): - self.id = id - self._result = ret_value - self._state = state - self._traceback = traceback - - def _get_task_meta(self): - return {'task_id': self.id, 'result': self._result, 'status': - self._state, 'traceback': self._traceback} - - def __reduce__(self): - return self.__class__, self.__reduce_args__() - - def __reduce_args__(self): - return (self.id, self._result, self._state, self._traceback) - - def __copy__(self): - cls, args = self.__reduce__() - return cls(*args) - - def ready(self): - return True - - def get(self, timeout=None, propagate=True, **kwargs): - if self.successful(): - return self.result - elif self.state in states.PROPAGATE_STATES: - if propagate: - raise self.result - return self.result - wait = get - - def forget(self): - pass - - def revoke(self, *args, **kwargs): - self._state = states.REVOKED - - def __repr__(self): - return ''.format(self) - - @property - def result(self): - """The tasks return value""" - return self._result - - @property - def state(self): - """The tasks state.""" - return self._state - status = state - - @property - def traceback(self): - """The traceback if the task failed.""" - return self._traceback - - @property - def supports_native_join(self): - return False - - -def result_from_tuple(r, app=None): - # earlier backends may just pickle, so check if - # result is already prepared. - app = app_or_default(app) - Result = app.AsyncResult - if not isinstance(r, ResultBase): - res, nodes = r - if nodes: - return app.GroupResult( - res, [result_from_tuple(child, app) for child in nodes], - ) - # previously did not include parent - id, parent = res if isinstance(res, (list, tuple)) else (res, None) - if parent: - parent = result_from_tuple(parent, app) - return Result(id, parent=parent) - return r -from_serializable = result_from_tuple # XXX compat diff --git a/thesisenv/lib/python3.6/site-packages/celery/schedules.py b/thesisenv/lib/python3.6/site-packages/celery/schedules.py deleted file mode 100644 index 6424dfa..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/schedules.py +++ /dev/null @@ -1,593 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.schedules - ~~~~~~~~~~~~~~~~ - - Schedules define the intervals at which periodic tasks - should run. - -""" -from __future__ import absolute_import - -import numbers -import re - -from collections import namedtuple -from datetime import datetime, timedelta - -from kombu.utils import cached_property - -from . import current_app -from .five import range, string_t -from .utils import is_iterable -from .utils.timeutils import ( - timedelta_seconds, weekday, maybe_timedelta, remaining, - humanize_seconds, timezone, maybe_make_aware, ffwd -) -from .datastructures import AttributeDict - -__all__ = ['ParseException', 'schedule', 'crontab', 'crontab_parser', - 'maybe_schedule'] - -schedstate = namedtuple('schedstate', ('is_due', 'next')) - - -CRON_PATTERN_INVALID = """\ -Invalid crontab pattern. Valid range is {min}-{max}. \ -'{value}' was found.\ -""" - -CRON_INVALID_TYPE = """\ -Argument cronspec needs to be of any of the following types: \ -int, str, or an iterable type. {type!r} was given.\ -""" - -CRON_REPR = """\ -\ -""" - - -def cronfield(s): - return '*' if s is None else s - - -class ParseException(Exception): - """Raised by crontab_parser when the input can't be parsed.""" - - -class schedule(object): - """Schedule for periodic task. - - :param run_every: Interval in seconds (or a :class:`~datetime.timedelta`). - :param relative: If set to True the run time will be rounded to the - resolution of the interval. - :param nowfun: Function returning the current date and time - (class:`~datetime.datetime`). - :param app: Celery app instance. - - """ - relative = False - - def __init__(self, run_every=None, relative=False, nowfun=None, app=None): - self.run_every = maybe_timedelta(run_every) - self.relative = relative - self.nowfun = nowfun - self._app = app - - def now(self): - return (self.nowfun or self.app.now)() - - def remaining_estimate(self, last_run_at): - return remaining( - self.maybe_make_aware(last_run_at), self.run_every, - self.maybe_make_aware(self.now()), self.relative, - ) - - def is_due(self, last_run_at): - """Returns tuple of two items `(is_due, next_time_to_check)`, - where next time to check is in seconds. - - e.g. - - * `(True, 20)`, means the task should be run now, and the next - time to check is in 20 seconds. - - * `(False, 12.3)`, means the task is not due, but that the scheduler - should check again in 12.3 seconds. - - The next time to check is used to save energy/cpu cycles, - it does not need to be accurate but will influence the precision - of your schedule. You must also keep in mind - the value of :setting:`CELERYBEAT_MAX_LOOP_INTERVAL`, - which decides the maximum number of seconds the scheduler can - sleep between re-checking the periodic task intervals. So if you - have a task that changes schedule at runtime then your next_run_at - check will decide how long it will take before a change to the - schedule takes effect. The max loop interval takes precendence - over the next check at value returned. - - .. admonition:: Scheduler max interval variance - - The default max loop interval may vary for different schedulers. - For the default scheduler the value is 5 minutes, but for e.g. - the django-celery database scheduler the value is 5 seconds. - - """ - last_run_at = self.maybe_make_aware(last_run_at) - rem_delta = self.remaining_estimate(last_run_at) - remaining_s = timedelta_seconds(rem_delta) - if remaining_s == 0: - return schedstate(is_due=True, next=self.seconds) - return schedstate(is_due=False, next=remaining_s) - - def maybe_make_aware(self, dt): - if self.utc_enabled: - return maybe_make_aware(dt, self.tz) - return dt - - def __repr__(self): - return ''.format(self) - - def __eq__(self, other): - if isinstance(other, schedule): - return self.run_every == other.run_every - return self.run_every == other - - def __ne__(self, other): - return not self.__eq__(other) - - def __reduce__(self): - return self.__class__, (self.run_every, self.relative, self.nowfun) - - @property - def seconds(self): - return timedelta_seconds(self.run_every) - - @property - def human_seconds(self): - return humanize_seconds(self.seconds) - - @property - def app(self): - return self._app or current_app._get_current_object() - - @app.setter # noqa - def app(self, app): - self._app = app - - @cached_property - def tz(self): - return self.app.timezone - - @cached_property - def utc_enabled(self): - return self.app.conf.CELERY_ENABLE_UTC - - def to_local(self, dt): - if not self.utc_enabled: - return timezone.to_local_fallback(dt) - return dt - - -class crontab_parser(object): - """Parser for crontab expressions. Any expression of the form 'groups' - (see BNF grammar below) is accepted and expanded to a set of numbers. - These numbers represent the units of time that the crontab needs to - run on:: - - digit :: '0'..'9' - dow :: 'a'..'z' - number :: digit+ | dow+ - steps :: number - range :: number ( '-' number ) ? - numspec :: '*' | range - expr :: numspec ( '/' steps ) ? - groups :: expr ( ',' expr ) * - - The parser is a general purpose one, useful for parsing hours, minutes and - day_of_week expressions. Example usage:: - - >>> minutes = crontab_parser(60).parse('*/15') - [0, 15, 30, 45] - >>> hours = crontab_parser(24).parse('*/4') - [0, 4, 8, 12, 16, 20] - >>> day_of_week = crontab_parser(7).parse('*') - [0, 1, 2, 3, 4, 5, 6] - - It can also parse day_of_month and month_of_year expressions if initialized - with an minimum of 1. Example usage:: - - >>> days_of_month = crontab_parser(31, 1).parse('*/3') - [1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 31] - >>> months_of_year = crontab_parser(12, 1).parse('*/2') - [1, 3, 5, 7, 9, 11] - >>> months_of_year = crontab_parser(12, 1).parse('2-12/2') - [2, 4, 6, 8, 10, 12] - - The maximum possible expanded value returned is found by the formula:: - - max_ + min_ - 1 - - """ - ParseException = ParseException - - _range = r'(\w+?)-(\w+)' - _steps = r'/(\w+)?' - _star = r'\*' - - def __init__(self, max_=60, min_=0): - self.max_ = max_ - self.min_ = min_ - self.pats = ( - (re.compile(self._range + self._steps), self._range_steps), - (re.compile(self._range), self._expand_range), - (re.compile(self._star + self._steps), self._star_steps), - (re.compile('^' + self._star + '$'), self._expand_star), - ) - - def parse(self, spec): - acc = set() - for part in spec.split(','): - if not part: - raise self.ParseException('empty part') - acc |= set(self._parse_part(part)) - return acc - - def _parse_part(self, part): - for regex, handler in self.pats: - m = regex.match(part) - if m: - return handler(m.groups()) - return self._expand_range((part, )) - - def _expand_range(self, toks): - fr = self._expand_number(toks[0]) - if len(toks) > 1: - to = self._expand_number(toks[1]) - if to < fr: # Wrap around max_ if necessary - return (list(range(fr, self.min_ + self.max_)) + - list(range(self.min_, to + 1))) - return list(range(fr, to + 1)) - return [fr] - - def _range_steps(self, toks): - if len(toks) != 3 or not toks[2]: - raise self.ParseException('empty filter') - return self._expand_range(toks[:2])[::int(toks[2])] - - def _star_steps(self, toks): - if not toks or not toks[0]: - raise self.ParseException('empty filter') - return self._expand_star()[::int(toks[0])] - - def _expand_star(self, *args): - return list(range(self.min_, self.max_ + self.min_)) - - def _expand_number(self, s): - if isinstance(s, string_t) and s[0] == '-': - raise self.ParseException('negative numbers not supported') - try: - i = int(s) - except ValueError: - try: - i = weekday(s) - except KeyError: - raise ValueError('Invalid weekday literal {0!r}.'.format(s)) - - max_val = self.min_ + self.max_ - 1 - if i > max_val: - raise ValueError( - 'Invalid end range: {0} > {1}.'.format(i, max_val)) - if i < self.min_: - raise ValueError( - 'Invalid beginning range: {0} < {1}.'.format(i, self.min_)) - - return i - - -class crontab(schedule): - """A crontab can be used as the `run_every` value of a - :class:`PeriodicTask` to add cron-like scheduling. - - Like a :manpage:`cron` job, you can specify units of time of when - you would like the task to execute. It is a reasonably complete - implementation of cron's features, so it should provide a fair - degree of scheduling needs. - - You can specify a minute, an hour, a day of the week, a day of the - month, and/or a month in the year in any of the following formats: - - .. attribute:: minute - - - A (list of) integers from 0-59 that represent the minutes of - an hour of when execution should occur; or - - A string representing a crontab pattern. This may get pretty - advanced, like `minute='*/15'` (for every quarter) or - `minute='1,13,30-45,50-59/2'`. - - .. attribute:: hour - - - A (list of) integers from 0-23 that represent the hours of - a day of when execution should occur; or - - A string representing a crontab pattern. This may get pretty - advanced, like `hour='*/3'` (for every three hours) or - `hour='0,8-17/2'` (at midnight, and every two hours during - office hours). - - .. attribute:: day_of_week - - - A (list of) integers from 0-6, where Sunday = 0 and Saturday = - 6, that represent the days of a week that execution should - occur. - - A string representing a crontab pattern. This may get pretty - advanced, like `day_of_week='mon-fri'` (for weekdays only). - (Beware that `day_of_week='*/2'` does not literally mean - 'every two days', but 'every day that is divisible by two'!) - - .. attribute:: day_of_month - - - A (list of) integers from 1-31 that represents the days of the - month that execution should occur. - - A string representing a crontab pattern. This may get pretty - advanced, such as `day_of_month='2-30/3'` (for every even - numbered day) or `day_of_month='1-7,15-21'` (for the first and - third weeks of the month). - - .. attribute:: month_of_year - - - A (list of) integers from 1-12 that represents the months of - the year during which execution can occur. - - A string representing a crontab pattern. This may get pretty - advanced, such as `month_of_year='*/3'` (for the first month - of every quarter) or `month_of_year='2-12/2'` (for every even - numbered month). - - .. attribute:: nowfun - - Function returning the current date and time - (:class:`~datetime.datetime`). - - .. attribute:: app - - The Celery app instance. - - It is important to realize that any day on which execution should - occur must be represented by entries in all three of the day and - month attributes. For example, if `day_of_week` is 0 and `day_of_month` - is every seventh day, only months that begin on Sunday and are also - in the `month_of_year` attribute will have execution events. Or, - `day_of_week` is 1 and `day_of_month` is '1-7,15-21' means every - first and third monday of every month present in `month_of_year`. - - """ - - def __init__(self, minute='*', hour='*', day_of_week='*', - day_of_month='*', month_of_year='*', nowfun=None, app=None): - self._orig_minute = cronfield(minute) - self._orig_hour = cronfield(hour) - self._orig_day_of_week = cronfield(day_of_week) - self._orig_day_of_month = cronfield(day_of_month) - self._orig_month_of_year = cronfield(month_of_year) - self.hour = self._expand_cronspec(hour, 24) - self.minute = self._expand_cronspec(minute, 60) - self.day_of_week = self._expand_cronspec(day_of_week, 7) - self.day_of_month = self._expand_cronspec(day_of_month, 31, 1) - self.month_of_year = self._expand_cronspec(month_of_year, 12, 1) - self.nowfun = nowfun - self._app = app - - @staticmethod - def _expand_cronspec(cronspec, max_, min_=0): - """Takes the given cronspec argument in one of the forms:: - - int (like 7) - str (like '3-5,*/15', '*', or 'monday') - set (like set([0,15,30,45])) - list (like [8-17]) - - And convert it to an (expanded) set representing all time unit - values on which the crontab triggers. Only in case of the base - type being 'str', parsing occurs. (It is fast and - happens only once for each crontab instance, so there is no - significant performance overhead involved.) - - For the other base types, merely Python type conversions happen. - - The argument `max_` is needed to determine the expansion of '*' - and ranges. - The argument `min_` is needed to determine the expansion of '*' - and ranges for 1-based cronspecs, such as day of month or month - of year. The default is sufficient for minute, hour, and day of - week. - - """ - if isinstance(cronspec, numbers.Integral): - result = set([cronspec]) - elif isinstance(cronspec, string_t): - result = crontab_parser(max_, min_).parse(cronspec) - elif isinstance(cronspec, set): - result = cronspec - elif is_iterable(cronspec): - result = set(cronspec) - else: - raise TypeError(CRON_INVALID_TYPE.format(type=type(cronspec))) - - # assure the result does not preceed the min or exceed the max - for number in result: - if number >= max_ + min_ or number < min_: - raise ValueError(CRON_PATTERN_INVALID.format( - min=min_, max=max_ - 1 + min_, value=number)) - return result - - def _delta_to_next(self, last_run_at, next_hour, next_minute): - """ - Takes a datetime of last run, next minute and hour, and - returns a relativedelta for the next scheduled day and time. - Only called when day_of_month and/or month_of_year cronspec - is specified to further limit scheduled task execution. - """ - from bisect import bisect, bisect_left - - datedata = AttributeDict(year=last_run_at.year) - days_of_month = sorted(self.day_of_month) - months_of_year = sorted(self.month_of_year) - - def day_out_of_range(year, month, day): - try: - datetime(year=year, month=month, day=day) - except ValueError: - return True - return False - - def roll_over(): - while 1: - flag = (datedata.dom == len(days_of_month) or - day_out_of_range(datedata.year, - months_of_year[datedata.moy], - days_of_month[datedata.dom]) or - (self.maybe_make_aware(datetime(datedata.year, - months_of_year[datedata.moy], - days_of_month[datedata.dom])) < last_run_at)) - - if flag: - datedata.dom = 0 - datedata.moy += 1 - if datedata.moy == len(months_of_year): - datedata.moy = 0 - datedata.year += 1 - else: - break - - if last_run_at.month in self.month_of_year: - datedata.dom = bisect(days_of_month, last_run_at.day) - datedata.moy = bisect_left(months_of_year, last_run_at.month) - else: - datedata.dom = 0 - datedata.moy = bisect(months_of_year, last_run_at.month) - if datedata.moy == len(months_of_year): - datedata.moy = 0 - roll_over() - - while 1: - th = datetime(year=datedata.year, - month=months_of_year[datedata.moy], - day=days_of_month[datedata.dom]) - if th.isoweekday() % 7 in self.day_of_week: - break - datedata.dom += 1 - roll_over() - - return ffwd(year=datedata.year, - month=months_of_year[datedata.moy], - day=days_of_month[datedata.dom], - hour=next_hour, - minute=next_minute, - second=0, - microsecond=0) - - def now(self): - return (self.nowfun or self.app.now)() - - def __repr__(self): - return CRON_REPR.format(self) - - def __reduce__(self): - return (self.__class__, (self._orig_minute, - self._orig_hour, - self._orig_day_of_week, - self._orig_day_of_month, - self._orig_month_of_year), None) - - def remaining_delta(self, last_run_at, tz=None, ffwd=ffwd): - tz = tz or self.tz - last_run_at = self.maybe_make_aware(last_run_at) - now = self.maybe_make_aware(self.now()) - dow_num = last_run_at.isoweekday() % 7 # Sunday is day 0, not day 7 - - execute_this_date = (last_run_at.month in self.month_of_year and - last_run_at.day in self.day_of_month and - dow_num in self.day_of_week) - - execute_this_hour = (execute_this_date and - last_run_at.day == now.day and - last_run_at.month == now.month and - last_run_at.year == now.year and - last_run_at.hour in self.hour and - last_run_at.minute < max(self.minute)) - - if execute_this_hour: - next_minute = min(minute for minute in self.minute - if minute > last_run_at.minute) - delta = ffwd(minute=next_minute, second=0, microsecond=0) - else: - next_minute = min(self.minute) - execute_today = (execute_this_date and - last_run_at.hour < max(self.hour)) - - if execute_today: - next_hour = min(hour for hour in self.hour - if hour > last_run_at.hour) - delta = ffwd(hour=next_hour, minute=next_minute, - second=0, microsecond=0) - else: - next_hour = min(self.hour) - all_dom_moy = (self._orig_day_of_month == '*' and - self._orig_month_of_year == '*') - if all_dom_moy: - next_day = min([day for day in self.day_of_week - if day > dow_num] or self.day_of_week) - add_week = next_day == dow_num - - delta = ffwd(weeks=add_week and 1 or 0, - weekday=(next_day - 1) % 7, - hour=next_hour, - minute=next_minute, - second=0, - microsecond=0) - else: - delta = self._delta_to_next(last_run_at, - next_hour, next_minute) - return self.to_local(last_run_at), delta, self.to_local(now) - - def remaining_estimate(self, last_run_at, ffwd=ffwd): - """Returns when the periodic task should run next as a timedelta.""" - return remaining(*self.remaining_delta(last_run_at, ffwd=ffwd)) - - def is_due(self, last_run_at): - """Returns tuple of two items `(is_due, next_time_to_run)`, - where next time to run is in seconds. - - See :meth:`celery.schedules.schedule.is_due` for more information. - - """ - rem_delta = self.remaining_estimate(last_run_at) - rem = timedelta_seconds(rem_delta) - due = rem == 0 - if due: - rem_delta = self.remaining_estimate(self.now()) - rem = timedelta_seconds(rem_delta) - return schedstate(due, rem) - - def __eq__(self, other): - if isinstance(other, crontab): - return (other.month_of_year == self.month_of_year and - other.day_of_month == self.day_of_month and - other.day_of_week == self.day_of_week and - other.hour == self.hour and - other.minute == self.minute) - return NotImplemented - - def __ne__(self, other): - return not self.__eq__(other) - - -def maybe_schedule(s, relative=False, app=None): - if s is not None: - if isinstance(s, numbers.Integral): - s = timedelta(seconds=s) - if isinstance(s, timedelta): - return schedule(s, relative, app=app) - else: - s.app = app - return s diff --git a/thesisenv/lib/python3.6/site-packages/celery/security/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/security/__init__.py deleted file mode 100644 index 352d400..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/security/__init__.py +++ /dev/null @@ -1,71 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.security - ~~~~~~~~~~~~~~~ - - Module implementing the signing message serializer. - -""" -from __future__ import absolute_import - -from kombu.serialization import ( - registry, disable_insecure_serializers as _disable_insecure_serializers, -) - -from celery.exceptions import ImproperlyConfigured - -from .serialization import register_auth - -SSL_NOT_INSTALLED = """\ -You need to install the pyOpenSSL library to use the auth serializer. -Please install by: - - $ pip install pyOpenSSL -""" - -SETTING_MISSING = """\ -Sorry, but you have to configure the - * CELERY_SECURITY_KEY - * CELERY_SECURITY_CERTIFICATE, and the - * CELERY_SECURITY_CERT_STORE -configuration settings to use the auth serializer. - -Please see the configuration reference for more information. -""" - -__all__ = ['setup_security'] - - -def setup_security(allowed_serializers=None, key=None, cert=None, store=None, - digest='sha1', serializer='json', app=None): - """See :meth:`@Celery.setup_security`.""" - if app is None: - from celery import current_app - app = current_app._get_current_object() - - _disable_insecure_serializers(allowed_serializers) - - conf = app.conf - if conf.CELERY_TASK_SERIALIZER != 'auth': - return - - try: - from OpenSSL import crypto # noqa - except ImportError: - raise ImproperlyConfigured(SSL_NOT_INSTALLED) - - key = key or conf.CELERY_SECURITY_KEY - cert = cert or conf.CELERY_SECURITY_CERTIFICATE - store = store or conf.CELERY_SECURITY_CERT_STORE - - if not (key and cert and store): - raise ImproperlyConfigured(SETTING_MISSING) - - with open(key) as kf: - with open(cert) as cf: - register_auth(kf.read(), cf.read(), store, digest, serializer) - registry._set_default_serializer('auth') - - -def disable_untrusted_serializers(whitelist=None): - _disable_insecure_serializers(allowed=whitelist) diff --git a/thesisenv/lib/python3.6/site-packages/celery/security/certificate.py b/thesisenv/lib/python3.6/site-packages/celery/security/certificate.py deleted file mode 100644 index c1c520c..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/security/certificate.py +++ /dev/null @@ -1,93 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.security.certificate - ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - X.509 certificates. - -""" -from __future__ import absolute_import - -import glob -import os - -from kombu.utils.encoding import bytes_to_str - -from celery.exceptions import SecurityError -from celery.five import values - -from .utils import crypto, reraise_errors - -__all__ = ['Certificate', 'CertStore', 'FSCertStore'] - - -class Certificate(object): - """X.509 certificate.""" - - def __init__(self, cert): - assert crypto is not None - with reraise_errors('Invalid certificate: {0!r}'): - self._cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert) - - def has_expired(self): - """Check if the certificate has expired.""" - return self._cert.has_expired() - - def get_serial_number(self): - """Return the serial number in the certificate.""" - return bytes_to_str(self._cert.get_serial_number()) - - def get_issuer(self): - """Return issuer (CA) as a string""" - return ' '.join(bytes_to_str(x[1]) for x in - self._cert.get_issuer().get_components()) - - def get_id(self): - """Serial number/issuer pair uniquely identifies a certificate""" - return '{0} {1}'.format(self.get_issuer(), self.get_serial_number()) - - def verify(self, data, signature, digest): - """Verifies the signature for string containing data.""" - with reraise_errors('Bad signature: {0!r}'): - crypto.verify(self._cert, signature, data, digest) - - -class CertStore(object): - """Base class for certificate stores""" - - def __init__(self): - self._certs = {} - - def itercerts(self): - """an iterator over the certificates""" - for c in values(self._certs): - yield c - - def __getitem__(self, id): - """get certificate by id""" - try: - return self._certs[bytes_to_str(id)] - except KeyError: - raise SecurityError('Unknown certificate: {0!r}'.format(id)) - - def add_cert(self, cert): - cert_id = bytes_to_str(cert.get_id()) - if cert_id in self._certs: - raise SecurityError('Duplicate certificate: {0!r}'.format(id)) - self._certs[cert_id] = cert - - -class FSCertStore(CertStore): - """File system certificate store""" - - def __init__(self, path): - CertStore.__init__(self) - if os.path.isdir(path): - path = os.path.join(path, '*') - for p in glob.glob(path): - with open(p) as f: - cert = Certificate(f.read()) - if cert.has_expired(): - raise SecurityError( - 'Expired certificate: {0!r}'.format(cert.get_id())) - self.add_cert(cert) diff --git a/thesisenv/lib/python3.6/site-packages/celery/security/key.py b/thesisenv/lib/python3.6/site-packages/celery/security/key.py deleted file mode 100644 index a5c2620..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/security/key.py +++ /dev/null @@ -1,27 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.security.key - ~~~~~~~~~~~~~~~~~~~ - - Private key for the security serializer. - -""" -from __future__ import absolute_import - -from kombu.utils.encoding import ensure_bytes - -from .utils import crypto, reraise_errors - -__all__ = ['PrivateKey'] - - -class PrivateKey(object): - - def __init__(self, key): - with reraise_errors('Invalid private key: {0!r}'): - self._key = crypto.load_privatekey(crypto.FILETYPE_PEM, key) - - def sign(self, data, digest): - """sign string containing data.""" - with reraise_errors('Unable to sign data: {0!r}'): - return crypto.sign(self._key, ensure_bytes(data), digest) diff --git a/thesisenv/lib/python3.6/site-packages/celery/security/serialization.py b/thesisenv/lib/python3.6/site-packages/celery/security/serialization.py deleted file mode 100644 index 7548358..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/security/serialization.py +++ /dev/null @@ -1,110 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.security.serialization - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Secure serializer. - -""" -from __future__ import absolute_import - -import base64 - -from kombu.serialization import registry, dumps, loads -from kombu.utils.encoding import bytes_to_str, str_to_bytes, ensure_bytes - -from .certificate import Certificate, FSCertStore -from .key import PrivateKey -from .utils import reraise_errors - -__all__ = ['SecureSerializer', 'register_auth'] - - -def b64encode(s): - return bytes_to_str(base64.b64encode(str_to_bytes(s))) - - -def b64decode(s): - return base64.b64decode(str_to_bytes(s)) - - -class SecureSerializer(object): - - def __init__(self, key=None, cert=None, cert_store=None, - digest='sha1', serializer='json'): - self._key = key - self._cert = cert - self._cert_store = cert_store - self._digest = digest - self._serializer = serializer - - def serialize(self, data): - """serialize data structure into string""" - assert self._key is not None - assert self._cert is not None - with reraise_errors('Unable to serialize: {0!r}', (Exception, )): - content_type, content_encoding, body = dumps( - bytes_to_str(data), serializer=self._serializer) - # What we sign is the serialized body, not the body itself. - # this way the receiver doesn't have to decode the contents - # to verify the signature (and thus avoiding potential flaws - # in the decoding step). - body = ensure_bytes(body) - return self._pack(body, content_type, content_encoding, - signature=self._key.sign(body, self._digest), - signer=self._cert.get_id()) - - def deserialize(self, data): - """deserialize data structure from string""" - assert self._cert_store is not None - with reraise_errors('Unable to deserialize: {0!r}', (Exception, )): - payload = self._unpack(data) - signature, signer, body = (payload['signature'], - payload['signer'], - payload['body']) - self._cert_store[signer].verify(body, signature, self._digest) - return loads(bytes_to_str(body), payload['content_type'], - payload['content_encoding'], force=True) - - def _pack(self, body, content_type, content_encoding, signer, signature, - sep=str_to_bytes('\x00\x01')): - fields = sep.join( - ensure_bytes(s) for s in [signer, signature, content_type, - content_encoding, body] - ) - return b64encode(fields) - - def _unpack(self, payload, sep=str_to_bytes('\x00\x01')): - raw_payload = b64decode(ensure_bytes(payload)) - first_sep = raw_payload.find(sep) - - signer = raw_payload[:first_sep] - signer_cert = self._cert_store[signer] - - sig_len = signer_cert._cert.get_pubkey().bits() >> 3 - signature = raw_payload[ - first_sep + len(sep):first_sep + len(sep) + sig_len - ] - end_of_sig = first_sep + len(sep) + sig_len + len(sep) - - v = raw_payload[end_of_sig:].split(sep) - - return { - 'signer': signer, - 'signature': signature, - 'content_type': bytes_to_str(v[0]), - 'content_encoding': bytes_to_str(v[1]), - 'body': bytes_to_str(v[2]), - } - - -def register_auth(key=None, cert=None, store=None, digest='sha1', - serializer='json'): - """register security serializer""" - s = SecureSerializer(key and PrivateKey(key), - cert and Certificate(cert), - store and FSCertStore(store), - digest=digest, serializer=serializer) - registry.register('auth', s.serialize, s.deserialize, - content_type='application/data', - content_encoding='utf-8') diff --git a/thesisenv/lib/python3.6/site-packages/celery/security/utils.py b/thesisenv/lib/python3.6/site-packages/celery/security/utils.py deleted file mode 100644 index d184d0b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/security/utils.py +++ /dev/null @@ -1,35 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.security.utils - ~~~~~~~~~~~~~~~~~~~~~ - - Utilities used by the message signing serializer. - -""" -from __future__ import absolute_import - -import sys - -from contextlib import contextmanager - -from celery.exceptions import SecurityError -from celery.five import reraise - -try: - from OpenSSL import crypto -except ImportError: # pragma: no cover - crypto = None # noqa - -__all__ = ['reraise_errors'] - - -@contextmanager -def reraise_errors(msg='{0!r}', errors=None): - assert crypto is not None - errors = (crypto.Error, ) if errors is None else errors - try: - yield - except errors as exc: - reraise(SecurityError, - SecurityError(msg.format(exc)), - sys.exc_info()[2]) diff --git a/thesisenv/lib/python3.6/site-packages/celery/signals.py b/thesisenv/lib/python3.6/site-packages/celery/signals.py deleted file mode 100644 index 2091830..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/signals.py +++ /dev/null @@ -1,76 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.signals - ~~~~~~~~~~~~~~ - - This module defines the signals (Observer pattern) sent by - both workers and clients. - - Functions can be connected to these signals, and connected - functions are called whenever a signal is called. - - See :ref:`signals` for more information. - -""" -from __future__ import absolute_import -from .utils.dispatch import Signal - -__all__ = ['before_task_publish', 'after_task_publish', - 'task_prerun', 'task_postrun', 'task_success', - 'task_retry', 'task_failure', 'task_revoked', 'celeryd_init', - 'celeryd_after_setup', 'worker_init', 'worker_process_init', - 'worker_ready', 'worker_shutdown', 'setup_logging', - 'after_setup_logger', 'after_setup_task_logger', - 'beat_init', 'beat_embedded_init', 'eventlet_pool_started', - 'eventlet_pool_preshutdown', 'eventlet_pool_postshutdown', - 'eventlet_pool_apply'] - -before_task_publish = Signal(providing_args=[ - 'body', 'exchange', 'routing_key', 'headers', 'properties', - 'declare', 'retry_policy', -]) -after_task_publish = Signal(providing_args=[ - 'body', 'exchange', 'routing_key', -]) -#: Deprecated, use after_task_publish instead. -task_sent = Signal(providing_args=[ - 'task_id', 'task', 'args', 'kwargs', 'eta', 'taskset', -]) -task_prerun = Signal(providing_args=['task_id', 'task', 'args', 'kwargs']) -task_postrun = Signal(providing_args=[ - 'task_id', 'task', 'args', 'kwargs', 'retval', -]) -task_success = Signal(providing_args=['result']) -task_retry = Signal(providing_args=[ - 'request', 'reason', 'einfo', -]) -task_failure = Signal(providing_args=[ - 'task_id', 'exception', 'args', 'kwargs', 'traceback', 'einfo', -]) -task_revoked = Signal(providing_args=[ - 'request', 'terminated', 'signum', 'expired', -]) -celeryd_init = Signal(providing_args=['instance', 'conf', 'options']) -celeryd_after_setup = Signal(providing_args=['instance', 'conf']) -import_modules = Signal(providing_args=[]) -worker_init = Signal(providing_args=[]) -worker_process_init = Signal(providing_args=[]) -worker_process_shutdown = Signal(providing_args=[]) -worker_ready = Signal(providing_args=[]) -worker_shutdown = Signal(providing_args=[]) -setup_logging = Signal(providing_args=[ - 'loglevel', 'logfile', 'format', 'colorize', -]) -after_setup_logger = Signal(providing_args=[ - 'logger', 'loglevel', 'logfile', 'format', 'colorize', -]) -after_setup_task_logger = Signal(providing_args=[ - 'logger', 'loglevel', 'logfile', 'format', 'colorize', -]) -beat_init = Signal(providing_args=[]) -beat_embedded_init = Signal(providing_args=[]) -eventlet_pool_started = Signal(providing_args=[]) -eventlet_pool_preshutdown = Signal(providing_args=[]) -eventlet_pool_postshutdown = Signal(providing_args=[]) -eventlet_pool_apply = Signal(providing_args=['target', 'args', 'kwargs']) -user_preload_options = Signal(providing_args=['app', 'options']) diff --git a/thesisenv/lib/python3.6/site-packages/celery/states.py b/thesisenv/lib/python3.6/site-packages/celery/states.py deleted file mode 100644 index 665a57b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/states.py +++ /dev/null @@ -1,153 +0,0 @@ -# -*- coding: utf-8 -*- -""" -celery.states -============= - -Built-in task states. - -.. _states: - -States ------- - -See :ref:`task-states`. - -.. _statesets: - -Sets ----- - -.. state:: READY_STATES - -READY_STATES -~~~~~~~~~~~~ - -Set of states meaning the task result is ready (has been executed). - -.. state:: UNREADY_STATES - -UNREADY_STATES -~~~~~~~~~~~~~~ - -Set of states meaning the task result is not ready (has not been executed). - -.. state:: EXCEPTION_STATES - -EXCEPTION_STATES -~~~~~~~~~~~~~~~~ - -Set of states meaning the task returned an exception. - -.. state:: PROPAGATE_STATES - -PROPAGATE_STATES -~~~~~~~~~~~~~~~~ - -Set of exception states that should propagate exceptions to the user. - -.. state:: ALL_STATES - -ALL_STATES -~~~~~~~~~~ - -Set of all possible states. - - -Misc. ------ - -""" -from __future__ import absolute_import - -__all__ = ['PENDING', 'RECEIVED', 'STARTED', 'SUCCESS', 'FAILURE', - 'REVOKED', 'RETRY', 'IGNORED', 'READY_STATES', 'UNREADY_STATES', - 'EXCEPTION_STATES', 'PROPAGATE_STATES', 'precedence', 'state'] - -#: State precedence. -#: None represents the precedence of an unknown state. -#: Lower index means higher precedence. -PRECEDENCE = ['SUCCESS', - 'FAILURE', - None, - 'REVOKED', - 'STARTED', - 'RECEIVED', - 'RETRY', - 'PENDING'] - -#: Hash lookup of PRECEDENCE to index -PRECEDENCE_LOOKUP = dict(zip(PRECEDENCE, range(0, len(PRECEDENCE)))) -NONE_PRECEDENCE = PRECEDENCE_LOOKUP[None] - - -def precedence(state): - """Get the precedence index for state. - - Lower index means higher precedence. - - """ - try: - return PRECEDENCE_LOOKUP[state] - except KeyError: - return NONE_PRECEDENCE - - -class state(str): - """State is a subclass of :class:`str`, implementing comparison - methods adhering to state precedence rules:: - - >>> from celery.states import state, PENDING, SUCCESS - - >>> state(PENDING) < state(SUCCESS) - True - - Any custom state is considered to be lower than :state:`FAILURE` and - :state:`SUCCESS`, but higher than any of the other built-in states:: - - >>> state('PROGRESS') > state(STARTED) - True - - >>> state('PROGRESS') > state('SUCCESS') - False - - """ - - def compare(self, other, fun): - return fun(precedence(self), precedence(other)) - - def __gt__(self, other): - return precedence(self) < precedence(other) - - def __ge__(self, other): - return precedence(self) <= precedence(other) - - def __lt__(self, other): - return precedence(self) > precedence(other) - - def __le__(self, other): - return precedence(self) >= precedence(other) - -#: Task state is unknown (assumed pending since you know the id). -PENDING = 'PENDING' -#: Task was received by a worker. -RECEIVED = 'RECEIVED' -#: Task was started by a worker (:setting:`CELERY_TRACK_STARTED`). -STARTED = 'STARTED' -#: Task succeeded -SUCCESS = 'SUCCESS' -#: Task failed -FAILURE = 'FAILURE' -#: Task was revoked. -REVOKED = 'REVOKED' -#: Task is waiting for retry. -RETRY = 'RETRY' -IGNORED = 'IGNORED' -REJECTED = 'REJECTED' - -READY_STATES = frozenset([SUCCESS, FAILURE, REVOKED]) -UNREADY_STATES = frozenset([PENDING, RECEIVED, STARTED, RETRY]) -EXCEPTION_STATES = frozenset([RETRY, FAILURE, REVOKED]) -PROPAGATE_STATES = frozenset([FAILURE, REVOKED]) - -ALL_STATES = frozenset([PENDING, RECEIVED, STARTED, - SUCCESS, FAILURE, RETRY, REVOKED]) diff --git a/thesisenv/lib/python3.6/site-packages/celery/task/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/task/__init__.py deleted file mode 100644 index 4ab1a2f..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/task/__init__.py +++ /dev/null @@ -1,59 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.task - ~~~~~~~~~~~ - - This is the old task module, it should not be used anymore, - import from the main 'celery' module instead. - If you're looking for the decorator implementation then that's in - ``celery.app.base.Celery.task``. - -""" -from __future__ import absolute_import - -from celery._state import current_app, current_task as current -from celery.five import LazyModule, recreate_module -from celery.local import Proxy - -__all__ = [ - 'BaseTask', 'Task', 'PeriodicTask', 'task', 'periodic_task', - 'group', 'chord', 'subtask', 'TaskSet', -] - - -STATICA_HACK = True -globals()['kcah_acitats'[::-1].upper()] = False -if STATICA_HACK: # pragma: no cover - # This is never executed, but tricks static analyzers (PyDev, PyCharm, - # pylint, etc.) into knowing the types of these symbols, and what - # they contain. - from celery.canvas import group, chord, subtask - from .base import BaseTask, Task, PeriodicTask, task, periodic_task - from .sets import TaskSet - - -class module(LazyModule): - - def __call__(self, *args, **kwargs): - return self.task(*args, **kwargs) - - -old_module, new_module = recreate_module( # pragma: no cover - __name__, - by_module={ - 'celery.task.base': ['BaseTask', 'Task', 'PeriodicTask', - 'task', 'periodic_task'], - 'celery.canvas': ['group', 'chord', 'subtask'], - 'celery.task.sets': ['TaskSet'], - }, - base=module, - __package__='celery.task', - __file__=__file__, - __path__=__path__, - __doc__=__doc__, - current=current, - discard_all=Proxy(lambda: current_app.control.purge), - backend_cleanup=Proxy( - lambda: current_app.tasks['celery.backend_cleanup'] - ), -) diff --git a/thesisenv/lib/python3.6/site-packages/celery/task/base.py b/thesisenv/lib/python3.6/site-packages/celery/task/base.py deleted file mode 100644 index aeb9f82..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/task/base.py +++ /dev/null @@ -1,179 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.task.base - ~~~~~~~~~~~~~~~~ - - The task implementation has been moved to :mod:`celery.app.task`. - - This contains the backward compatible Task class used in the old API, - and shouldn't be used in new applications. - -""" -from __future__ import absolute_import - -from kombu import Exchange - -from celery import current_app -from celery.app.task import Context, TaskType, Task as BaseTask # noqa -from celery.five import class_property, reclassmethod -from celery.schedules import maybe_schedule -from celery.utils.log import get_task_logger - -__all__ = ['Task', 'PeriodicTask', 'task'] - -#: list of methods that must be classmethods in the old API. -_COMPAT_CLASSMETHODS = ( - 'delay', 'apply_async', 'retry', 'apply', 'subtask_from_request', - 'AsyncResult', 'subtask', '_get_request', '_get_exec_options', -) - - -class Task(BaseTask): - """Deprecated Task base class. - - Modern applications should use :class:`celery.Task` instead. - - """ - abstract = True - __bound__ = False - __v2_compat__ = True - - # - Deprecated compat. attributes -: - - queue = None - routing_key = None - exchange = None - exchange_type = None - delivery_mode = None - mandatory = False # XXX deprecated - immediate = False # XXX deprecated - priority = None - type = 'regular' - disable_error_emails = False - accept_magic_kwargs = False - - from_config = BaseTask.from_config + ( - ('exchange_type', 'CELERY_DEFAULT_EXCHANGE_TYPE'), - ('delivery_mode', 'CELERY_DEFAULT_DELIVERY_MODE'), - ) - - # In old Celery the @task decorator didn't exist, so one would create - # classes instead and use them directly (e.g. MyTask.apply_async()). - # the use of classmethods was a hack so that it was not necessary - # to instantiate the class before using it, but it has only - # given us pain (like all magic). - for name in _COMPAT_CLASSMETHODS: - locals()[name] = reclassmethod(getattr(BaseTask, name)) - - @class_property - def request(cls): - return cls._get_request() - - @class_property - def backend(cls): - if cls._backend is None: - return cls.app.backend - return cls._backend - - @backend.setter - def backend(cls, value): # noqa - cls._backend = value - - @classmethod - def get_logger(self, **kwargs): - return get_task_logger(self.name) - - @classmethod - def establish_connection(self): - """Deprecated method used to get a broker connection. - - Should be replaced with :meth:`@Celery.connection` - instead, or by acquiring connections from the connection pool: - - .. code-block:: python - - # using the connection pool - with celery.pool.acquire(block=True) as conn: - ... - - # establish fresh connection - with celery.connection() as conn: - ... - """ - return self._get_app().connection() - - def get_publisher(self, connection=None, exchange=None, - exchange_type=None, **options): - """Deprecated method to get the task publisher (now called producer). - - Should be replaced with :class:`@amqp.TaskProducer`: - - .. code-block:: python - - with celery.connection() as conn: - with celery.amqp.TaskProducer(conn) as prod: - my_task.apply_async(producer=prod) - - """ - exchange = self.exchange if exchange is None else exchange - if exchange_type is None: - exchange_type = self.exchange_type - connection = connection or self.establish_connection() - return self._get_app().amqp.TaskProducer( - connection, - exchange=exchange and Exchange(exchange, exchange_type), - routing_key=self.routing_key, **options - ) - - @classmethod - def get_consumer(self, connection=None, queues=None, **kwargs): - """Deprecated method used to get consumer for the queue - this task is sent to. - - Should be replaced with :class:`@amqp.TaskConsumer` instead: - - """ - Q = self._get_app().amqp - connection = connection or self.establish_connection() - if queues is None: - queues = Q.queues[self.queue] if self.queue else Q.default_queue - return Q.TaskConsumer(connection, queues, **kwargs) - - -class PeriodicTask(Task): - """A periodic task is a task that adds itself to the - :setting:`CELERYBEAT_SCHEDULE` setting.""" - abstract = True - ignore_result = True - relative = False - options = None - compat = True - - def __init__(self): - if not hasattr(self, 'run_every'): - raise NotImplementedError( - 'Periodic tasks must have a run_every attribute') - self.run_every = maybe_schedule(self.run_every, self.relative) - super(PeriodicTask, self).__init__() - - @classmethod - def on_bound(cls, app): - app.conf.CELERYBEAT_SCHEDULE[cls.name] = { - 'task': cls.name, - 'schedule': cls.run_every, - 'args': (), - 'kwargs': {}, - 'options': cls.options or {}, - 'relative': cls.relative, - } - - -def task(*args, **kwargs): - """Deprecated decorator, please use :func:`celery.task`.""" - return current_app.task(*args, **dict({'accept_magic_kwargs': False, - 'base': Task}, **kwargs)) - - -def periodic_task(*args, **options): - """Deprecated decorator, please use :setting:`CELERYBEAT_SCHEDULE`.""" - return task(**dict({'base': PeriodicTask}, **options)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/task/http.py b/thesisenv/lib/python3.6/site-packages/celery/task/http.py deleted file mode 100644 index e170ec3..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/task/http.py +++ /dev/null @@ -1,220 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.task.http - ~~~~~~~~~~~~~~~~ - - Webhook task implementation. - -""" -from __future__ import absolute_import - -import anyjson -import sys - -try: - from urllib.parse import parse_qsl, urlencode, urlparse # Py3 -except ImportError: # pragma: no cover - from urllib import urlencode # noqa - from urlparse import urlparse, parse_qsl # noqa - -from celery import shared_task, __version__ as celery_version -from celery.five import items, reraise -from celery.utils.log import get_task_logger - -__all__ = ['InvalidResponseError', 'RemoteExecuteError', 'UnknownStatusError', - 'HttpDispatch', 'dispatch', 'URL'] - -GET_METHODS = frozenset(['GET', 'HEAD']) -logger = get_task_logger(__name__) - - -if sys.version_info[0] == 3: # pragma: no cover - - from urllib.request import Request, urlopen - - def utf8dict(tup): - if not isinstance(tup, dict): - return dict(tup) - return tup - -else: - - from urllib2 import Request, urlopen # noqa - - def utf8dict(tup): # noqa - """With a dict's items() tuple return a new dict with any utf-8 - keys/values encoded.""" - return dict( - (k.encode('utf-8'), - v.encode('utf-8') if isinstance(v, unicode) else v) # noqa - for k, v in tup) - - -class InvalidResponseError(Exception): - """The remote server gave an invalid response.""" - - -class RemoteExecuteError(Exception): - """The remote task gave a custom error.""" - - -class UnknownStatusError(InvalidResponseError): - """The remote server gave an unknown status.""" - - -def extract_response(raw_response, loads=anyjson.loads): - """Extract the response text from a raw JSON response.""" - if not raw_response: - raise InvalidResponseError('Empty response') - try: - payload = loads(raw_response) - except ValueError as exc: - reraise(InvalidResponseError, InvalidResponseError( - str(exc)), sys.exc_info()[2]) - - status = payload['status'] - if status == 'success': - return payload['retval'] - elif status == 'failure': - raise RemoteExecuteError(payload.get('reason')) - else: - raise UnknownStatusError(str(status)) - - -class MutableURL(object): - """Object wrapping a Uniform Resource Locator. - - Supports editing the query parameter list. - You can convert the object back to a string, the query will be - properly urlencoded. - - Examples - - >>> url = URL('http://www.google.com:6580/foo/bar?x=3&y=4#foo') - >>> url.query - {'x': '3', 'y': '4'} - >>> str(url) - 'http://www.google.com:6580/foo/bar?y=4&x=3#foo' - >>> url.query['x'] = 10 - >>> url.query.update({'George': 'Costanza'}) - >>> str(url) - 'http://www.google.com:6580/foo/bar?y=4&x=10&George=Costanza#foo' - - """ - def __init__(self, url): - self.parts = urlparse(url) - self.query = dict(parse_qsl(self.parts[4])) - - def __str__(self): - scheme, netloc, path, params, query, fragment = self.parts - query = urlencode(utf8dict(items(self.query))) - components = [scheme + '://', netloc, path or '/', - ';{0}'.format(params) if params else '', - '?{0}'.format(query) if query else '', - '#{0}'.format(fragment) if fragment else ''] - return ''.join(c for c in components if c) - - def __repr__(self): - return '<{0}: {1}>'.format(type(self).__name__, self) - - -class HttpDispatch(object): - """Make task HTTP request and collect the task result. - - :param url: The URL to request. - :param method: HTTP method used. Currently supported methods are `GET` - and `POST`. - :param task_kwargs: Task keyword arguments. - :param logger: Logger used for user/system feedback. - - """ - user_agent = 'celery/{version}'.format(version=celery_version) - timeout = 5 - - def __init__(self, url, method, task_kwargs, **kwargs): - self.url = url - self.method = method - self.task_kwargs = task_kwargs - self.logger = kwargs.get('logger') or logger - - def make_request(self, url, method, params): - """Perform HTTP request and return the response.""" - request = Request(url, params) - for key, val in items(self.http_headers): - request.add_header(key, val) - response = urlopen(request) # user catches errors. - return response.read() - - def dispatch(self): - """Dispatch callback and return result.""" - url = MutableURL(self.url) - params = None - if self.method in GET_METHODS: - url.query.update(self.task_kwargs) - else: - params = urlencode(utf8dict(items(self.task_kwargs))) - raw_response = self.make_request(str(url), self.method, params) - return extract_response(raw_response) - - @property - def http_headers(self): - headers = {'User-Agent': self.user_agent} - return headers - - -@shared_task(name='celery.http_dispatch', bind=True, - url=None, method=None, accept_magic_kwargs=False) -def dispatch(self, url=None, method='GET', **kwargs): - """Task dispatching to an URL. - - :keyword url: The URL location of the HTTP callback task. - :keyword method: Method to use when dispatching the callback. Usually - `GET` or `POST`. - :keyword \*\*kwargs: Keyword arguments to pass on to the HTTP callback. - - .. attribute:: url - - If this is set, this is used as the default URL for requests. - Default is to require the user of the task to supply the url as an - argument, as this attribute is intended for subclasses. - - .. attribute:: method - - If this is set, this is the default method used for requests. - Default is to require the user of the task to supply the method as an - argument, as this attribute is intended for subclasses. - - """ - return HttpDispatch( - url or self.url, method or self.method, kwargs, - ).dispatch() - - -class URL(MutableURL): - """HTTP Callback URL - - Supports requesting an URL asynchronously. - - :param url: URL to request. - :keyword dispatcher: Class used to dispatch the request. - By default this is :func:`dispatch`. - - """ - dispatcher = None - - def __init__(self, url, dispatcher=None, app=None): - super(URL, self).__init__(url) - self.app = app - self.dispatcher = dispatcher or self.dispatcher - if self.dispatcher is None: - # Get default dispatcher - self.dispatcher = ( - self.app.tasks['celery.http_dispatch'] if self.app - else dispatch - ) - - def get_async(self, **kwargs): - return self.dispatcher.delay(str(self), 'GET', **kwargs) - - def post_async(self, **kwargs): - return self.dispatcher.delay(str(self), 'POST', **kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/celery/task/sets.py b/thesisenv/lib/python3.6/site-packages/celery/task/sets.py deleted file mode 100644 index e277b79..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/task/sets.py +++ /dev/null @@ -1,88 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.task.sets - ~~~~~~~~~~~~~~~~ - - Old ``group`` implementation, this module should - not be used anymore use :func:`celery.group` instead. - -""" -from __future__ import absolute_import - -from celery._state import get_current_worker_task -from celery.app import app_or_default -from celery.canvas import maybe_signature # noqa -from celery.utils import uuid, warn_deprecated - -from celery.canvas import subtask # noqa - -warn_deprecated( - 'celery.task.sets and TaskSet', removal='4.0', - alternative="""\ -Please use "group" instead (see the Canvas section in the userguide)\ -""") - - -class TaskSet(list): - """A task containing several subtasks, making it possible - to track how many, or when all of the tasks have been completed. - - :param tasks: A list of :class:`subtask` instances. - - Example:: - - >>> from myproj.tasks import refresh_feed - - >>> urls = ('http://cnn.com/rss', 'http://bbc.co.uk/rss') - >>> s = TaskSet(refresh_feed.s(url) for url in urls) - >>> taskset_result = s.apply_async() - >>> list_of_return_values = taskset_result.join() # *expensive* - - """ - app = None - - def __init__(self, tasks=None, app=None, Publisher=None): - self.app = app_or_default(app or self.app) - super(TaskSet, self).__init__( - maybe_signature(t, app=self.app) for t in tasks or [] - ) - self.Publisher = Publisher or self.app.amqp.TaskProducer - self.total = len(self) # XXX compat - - def apply_async(self, connection=None, publisher=None, taskset_id=None): - """Apply TaskSet.""" - app = self.app - - if app.conf.CELERY_ALWAYS_EAGER: - return self.apply(taskset_id=taskset_id) - - with app.connection_or_acquire(connection) as conn: - setid = taskset_id or uuid() - pub = publisher or self.Publisher(conn) - results = self._async_results(setid, pub) - - result = app.TaskSetResult(setid, results) - parent = get_current_worker_task() - if parent: - parent.add_trail(result) - return result - - def _async_results(self, taskset_id, publisher): - return [task.apply_async(taskset_id=taskset_id, publisher=publisher) - for task in self] - - def apply(self, taskset_id=None): - """Applies the TaskSet locally by blocking until all tasks return.""" - setid = taskset_id or uuid() - return self.app.TaskSetResult(setid, self._sync_results(setid)) - - def _sync_results(self, taskset_id): - return [task.apply(taskset_id=taskset_id) for task in self] - - @property - def tasks(self): - return self - - @tasks.setter # noqa - def tasks(self, tasks): - self[:] = tasks diff --git a/thesisenv/lib/python3.6/site-packages/celery/task/trace.py b/thesisenv/lib/python3.6/site-packages/celery/task/trace.py deleted file mode 100644 index 43f19cb..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/task/trace.py +++ /dev/null @@ -1,12 +0,0 @@ -"""This module has moved to celery.app.trace.""" -from __future__ import absolute_import - -import sys - -from celery.app import trace -from celery.utils import warn_deprecated - -warn_deprecated('celery.task.trace', removal='3.2', - alternative='Please use celery.app.trace instead.') - -sys.modules[__name__] = trace diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/__init__.py deleted file mode 100644 index 9667872..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/__init__.py +++ /dev/null @@ -1,87 +0,0 @@ -from __future__ import absolute_import - -import logging -import os -import sys -import warnings - -from importlib import import_module - -try: - WindowsError = WindowsError # noqa -except NameError: - - class WindowsError(Exception): - pass - - -def setup(): - os.environ.update( - # warn if config module not found - C_WNOCONF='yes', - KOMBU_DISABLE_LIMIT_PROTECTION='yes', - ) - - if os.environ.get('COVER_ALL_MODULES') or '--with-coverage' in sys.argv: - from warnings import catch_warnings - with catch_warnings(record=True): - import_all_modules() - warnings.resetwarnings() - from celery.tests.case import Trap - from celery._state import set_default_app - set_default_app(Trap()) - - -def teardown(): - # Don't want SUBDEBUG log messages at finalization. - try: - from multiprocessing.util import get_logger - except ImportError: - pass - else: - get_logger().setLevel(logging.WARNING) - - # Make sure test database is removed. - import os - if os.path.exists('test.db'): - try: - os.remove('test.db') - except WindowsError: - pass - - # Make sure there are no remaining threads at shutdown. - import threading - remaining_threads = [thread for thread in threading.enumerate() - if thread.getName() != 'MainThread'] - if remaining_threads: - sys.stderr.write( - '\n\n**WARNING**: Remaining threads at teardown: %r...\n' % ( - remaining_threads)) - - -def find_distribution_modules(name=__name__, file=__file__): - current_dist_depth = len(name.split('.')) - 1 - current_dist = os.path.join(os.path.dirname(file), - *([os.pardir] * current_dist_depth)) - abs = os.path.abspath(current_dist) - dist_name = os.path.basename(abs) - - for dirpath, dirnames, filenames in os.walk(abs): - package = (dist_name + dirpath[len(abs):]).replace('/', '.') - if '__init__.py' in filenames: - yield package - for filename in filenames: - if filename.endswith('.py') and filename != '__init__.py': - yield '.'.join([package, filename])[:-3] - - -def import_all_modules(name=__name__, file=__file__, - skip=('celery.decorators', - 'celery.contrib.batches', - 'celery.task')): - for module in find_distribution_modules(name, file): - if not module.startswith(skip): - try: - import_module(module) - except ImportError: - pass diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_amqp.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_amqp.py deleted file mode 100644 index efb398a..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_amqp.py +++ /dev/null @@ -1,228 +0,0 @@ -from __future__ import absolute_import - -import datetime - -import pytz - -from kombu import Exchange, Queue - -from celery.app.amqp import Queues, TaskPublisher -from celery.five import keys -from celery.tests.case import AppCase, Mock - - -class test_TaskProducer(AppCase): - - def test__exit__(self): - publisher = self.app.amqp.TaskProducer(self.app.connection()) - publisher.release = Mock() - with publisher: - pass - publisher.release.assert_called_with() - - def test_declare(self): - publisher = self.app.amqp.TaskProducer(self.app.connection()) - publisher.exchange.name = 'foo' - publisher.declare() - publisher.exchange.name = None - publisher.declare() - - def test_retry_policy(self): - prod = self.app.amqp.TaskProducer(Mock()) - prod.channel.connection.client.declared_entities = set() - prod.publish_task('tasks.add', (2, 2), {}, - retry_policy={'frobulate': 32.4}) - - def test_publish_no_retry(self): - prod = self.app.amqp.TaskProducer(Mock()) - prod.channel.connection.client.declared_entities = set() - prod.publish_task('tasks.add', (2, 2), {}, retry=False, chord=123) - self.assertFalse(prod.connection.ensure.call_count) - - def test_publish_custom_queue(self): - prod = self.app.amqp.TaskProducer(Mock()) - self.app.amqp.queues['some_queue'] = Queue( - 'xxx', Exchange('yyy'), 'zzz', - ) - prod.channel.connection.client.declared_entities = set() - prod.publish = Mock() - prod.publish_task('tasks.add', (8, 8), {}, retry=False, - queue='some_queue') - self.assertEqual(prod.publish.call_args[1]['exchange'], 'yyy') - self.assertEqual(prod.publish.call_args[1]['routing_key'], 'zzz') - - def test_publish_with_countdown(self): - prod = self.app.amqp.TaskProducer(Mock()) - prod.channel.connection.client.declared_entities = set() - prod.publish = Mock() - now = datetime.datetime(2013, 11, 26, 16, 48, 46) - prod.publish_task('tasks.add', (1, 1), {}, retry=False, - countdown=10, now=now) - self.assertEqual( - prod.publish.call_args[0][0]['eta'], - '2013-11-26T16:48:56+00:00', - ) - - def test_publish_with_countdown_and_timezone(self): - # use timezone with fixed offset to be sure it won't be changed - self.app.conf.CELERY_TIMEZONE = pytz.FixedOffset(120) - prod = self.app.amqp.TaskProducer(Mock()) - prod.channel.connection.client.declared_entities = set() - prod.publish = Mock() - now = datetime.datetime(2013, 11, 26, 16, 48, 46) - prod.publish_task('tasks.add', (2, 2), {}, retry=False, - countdown=20, now=now) - self.assertEqual( - prod.publish.call_args[0][0]['eta'], - '2013-11-26T18:49:06+02:00', - ) - - def test_event_dispatcher(self): - prod = self.app.amqp.TaskProducer(Mock()) - self.assertTrue(prod.event_dispatcher) - self.assertFalse(prod.event_dispatcher.enabled) - - -class test_TaskConsumer(AppCase): - - def test_accept_content(self): - with self.app.pool.acquire(block=True) as conn: - self.app.conf.CELERY_ACCEPT_CONTENT = ['application/json'] - self.assertEqual( - self.app.amqp.TaskConsumer(conn).accept, - set(['application/json']) - ) - self.assertEqual( - self.app.amqp.TaskConsumer(conn, accept=['json']).accept, - set(['application/json']), - ) - - -class test_compat_TaskPublisher(AppCase): - - def test_compat_exchange_is_string(self): - producer = TaskPublisher(exchange='foo', app=self.app) - self.assertIsInstance(producer.exchange, Exchange) - self.assertEqual(producer.exchange.name, 'foo') - self.assertEqual(producer.exchange.type, 'direct') - producer = TaskPublisher(exchange='foo', exchange_type='topic', - app=self.app) - self.assertEqual(producer.exchange.type, 'topic') - - def test_compat_exchange_is_Exchange(self): - producer = TaskPublisher(exchange=Exchange('foo'), app=self.app) - self.assertEqual(producer.exchange.name, 'foo') - - -class test_PublisherPool(AppCase): - - def test_setup_nolimit(self): - self.app.conf.BROKER_POOL_LIMIT = None - try: - delattr(self.app, '_pool') - except AttributeError: - pass - self.app.amqp._producer_pool = None - pool = self.app.amqp.producer_pool - self.assertEqual(pool.limit, self.app.pool.limit) - self.assertFalse(pool._resource.queue) - - r1 = pool.acquire() - r2 = pool.acquire() - r1.release() - r2.release() - r1 = pool.acquire() - r2 = pool.acquire() - - def test_setup(self): - self.app.conf.BROKER_POOL_LIMIT = 2 - try: - delattr(self.app, '_pool') - except AttributeError: - pass - self.app.amqp._producer_pool = None - pool = self.app.amqp.producer_pool - self.assertEqual(pool.limit, self.app.pool.limit) - self.assertTrue(pool._resource.queue) - - p1 = r1 = pool.acquire() - p2 = r2 = pool.acquire() - r1.release() - r2.release() - r1 = pool.acquire() - r2 = pool.acquire() - self.assertIs(p2, r1) - self.assertIs(p1, r2) - r1.release() - r2.release() - - -class test_Queues(AppCase): - - def test_queues_format(self): - self.app.amqp.queues._consume_from = {} - self.assertEqual(self.app.amqp.queues.format(), '') - - def test_with_defaults(self): - self.assertEqual(Queues(None), {}) - - def test_add(self): - q = Queues() - q.add('foo', exchange='ex', routing_key='rk') - self.assertIn('foo', q) - self.assertIsInstance(q['foo'], Queue) - self.assertEqual(q['foo'].routing_key, 'rk') - - def test_with_ha_policy(self): - qn = Queues(ha_policy=None, create_missing=False) - qn.add('xyz') - self.assertIsNone(qn['xyz'].queue_arguments) - - qn.add('xyx', queue_arguments={'x-foo': 'bar'}) - self.assertEqual(qn['xyx'].queue_arguments, {'x-foo': 'bar'}) - - q = Queues(ha_policy='all', create_missing=False) - q.add(Queue('foo')) - self.assertEqual(q['foo'].queue_arguments, {'x-ha-policy': 'all'}) - - qq = Queue('xyx2', queue_arguments={'x-foo': 'bari'}) - q.add(qq) - self.assertEqual(q['xyx2'].queue_arguments, { - 'x-ha-policy': 'all', - 'x-foo': 'bari', - }) - - q2 = Queues(ha_policy=['A', 'B', 'C'], create_missing=False) - q2.add(Queue('foo')) - self.assertEqual(q2['foo'].queue_arguments, { - 'x-ha-policy': 'nodes', - 'x-ha-policy-params': ['A', 'B', 'C'], - }) - - def test_select_add(self): - q = Queues() - q.select(['foo', 'bar']) - q.select_add('baz') - self.assertItemsEqual(keys(q._consume_from), ['foo', 'bar', 'baz']) - - def test_deselect(self): - q = Queues() - q.select(['foo', 'bar']) - q.deselect('bar') - self.assertItemsEqual(keys(q._consume_from), ['foo']) - - def test_with_ha_policy_compat(self): - q = Queues(ha_policy='all') - q.add('bar') - self.assertEqual(q['bar'].queue_arguments, {'x-ha-policy': 'all'}) - - def test_add_default_exchange(self): - ex = Exchange('fff', 'fanout') - q = Queues(default_exchange=ex) - q.add(Queue('foo')) - self.assertEqual(q['foo'].exchange, ex) - - def test_alias(self): - q = Queues() - q.add(Queue('foo', alias='barfoo')) - self.assertIs(q['barfoo'], q['foo']) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_annotations.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_annotations.py deleted file mode 100644 index 559f5cb..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_annotations.py +++ /dev/null @@ -1,56 +0,0 @@ -from __future__ import absolute_import - -from celery.app.annotations import MapAnnotation, prepare -from celery.utils.imports import qualname - -from celery.tests.case import AppCase - - -class MyAnnotation(object): - foo = 65 - - -class AnnotationCase(AppCase): - - def setup(self): - @self.app.task(shared=False) - def add(x, y): - return x + y - self.add = add - - @self.app.task(shared=False) - def mul(x, y): - return x * y - self.mul = mul - - -class test_MapAnnotation(AnnotationCase): - - def test_annotate(self): - x = MapAnnotation({self.add.name: {'foo': 1}}) - self.assertDictEqual(x.annotate(self.add), {'foo': 1}) - self.assertIsNone(x.annotate(self.mul)) - - def test_annotate_any(self): - x = MapAnnotation({'*': {'foo': 2}}) - self.assertDictEqual(x.annotate_any(), {'foo': 2}) - - x = MapAnnotation() - self.assertIsNone(x.annotate_any()) - - -class test_prepare(AnnotationCase): - - def test_dict_to_MapAnnotation(self): - x = prepare({self.add.name: {'foo': 3}}) - self.assertIsInstance(x[0], MapAnnotation) - - def test_returns_list(self): - self.assertListEqual(prepare(1), [1]) - self.assertListEqual(prepare([1]), [1]) - self.assertListEqual(prepare((1, )), [1]) - self.assertEqual(prepare(None), ()) - - def test_evalutes_qualnames(self): - self.assertEqual(prepare(qualname(MyAnnotation))[0]().foo, 65) - self.assertEqual(prepare([qualname(MyAnnotation)])[0]().foo, 65) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_app.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_app.py deleted file mode 100644 index 9d260c6..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_app.py +++ /dev/null @@ -1,726 +0,0 @@ -from __future__ import absolute_import - -import gc -import os -import itertools - -from copy import deepcopy -from pickle import loads, dumps - -from amqp import promise -from kombu import Exchange - -from celery import shared_task, current_app -from celery import app as _app -from celery import _state -from celery.app import base as _appbase -from celery.app import defaults -from celery.exceptions import ImproperlyConfigured -from celery.five import items -from celery.loaders.base import BaseLoader -from celery.platforms import pyimplementation -from celery.utils.serialization import pickle - -from celery.tests.case import ( - CELERY_TEST_CONFIG, - AppCase, - Mock, - depends_on_current_app, - mask_modules, - patch, - platform_pyimp, - sys_platform, - pypy_version, - with_environ, -) -from celery.utils import uuid -from celery.utils.mail import ErrorMail - -THIS_IS_A_KEY = 'this is a value' - - -class ObjectConfig(object): - FOO = 1 - BAR = 2 - -object_config = ObjectConfig() -dict_config = dict(FOO=10, BAR=20) - - -class ObjectConfig2(object): - LEAVE_FOR_WORK = True - MOMENT_TO_STOP = True - CALL_ME_BACK = 123456789 - WANT_ME_TO = False - UNDERSTAND_ME = True - - -class Object(object): - - def __init__(self, **kwargs): - for key, value in items(kwargs): - setattr(self, key, value) - - -def _get_test_config(): - return deepcopy(CELERY_TEST_CONFIG) -test_config = _get_test_config() - - -class test_module(AppCase): - - def test_default_app(self): - self.assertEqual(_app.default_app, _state.default_app) - - def test_bugreport(self): - self.assertTrue(_app.bugreport(app=self.app)) - - -class test_App(AppCase): - - def setup(self): - self.app.add_defaults(test_config) - - def test_task_autofinalize_disabled(self): - with self.Celery('xyzibari', autofinalize=False) as app: - @app.task - def ttafd(): - return 42 - - with self.assertRaises(RuntimeError): - ttafd() - - with self.Celery('xyzibari', autofinalize=False) as app: - @app.task - def ttafd2(): - return 42 - - app.finalize() - self.assertEqual(ttafd2(), 42) - - def test_registry_autofinalize_disabled(self): - with self.Celery('xyzibari', autofinalize=False) as app: - with self.assertRaises(RuntimeError): - app.tasks['celery.chain'] - app.finalize() - self.assertTrue(app.tasks['celery.chain']) - - def test_task(self): - with self.Celery('foozibari') as app: - - def fun(): - pass - - fun.__module__ = '__main__' - task = app.task(fun) - self.assertEqual(task.name, app.main + '.fun') - - def test_with_config_source(self): - with self.Celery(config_source=ObjectConfig) as app: - self.assertEqual(app.conf.FOO, 1) - self.assertEqual(app.conf.BAR, 2) - - @depends_on_current_app - def test_task_windows_execv(self): - prev, _appbase._EXECV = _appbase._EXECV, True - try: - - @self.app.task(shared=False) - def foo(): - pass - - self.assertTrue(foo._get_current_object()) # is proxy - - finally: - _appbase._EXECV = prev - assert not _appbase._EXECV - - def test_task_takes_no_args(self): - with self.assertRaises(TypeError): - @self.app.task(1) - def foo(): - pass - - def test_add_defaults(self): - self.assertFalse(self.app.configured) - _conf = {'FOO': 300} - - def conf(): - return _conf - - self.app.add_defaults(conf) - self.assertIn(conf, self.app._pending_defaults) - self.assertFalse(self.app.configured) - self.assertEqual(self.app.conf.FOO, 300) - self.assertTrue(self.app.configured) - self.assertFalse(self.app._pending_defaults) - - # defaults not pickled - appr = loads(dumps(self.app)) - with self.assertRaises(AttributeError): - appr.conf.FOO - - # add more defaults after configured - conf2 = {'FOO': 'BAR'} - self.app.add_defaults(conf2) - self.assertEqual(self.app.conf.FOO, 'BAR') - - self.assertIn(_conf, self.app.conf.defaults) - self.assertIn(conf2, self.app.conf.defaults) - - def test_connection_or_acquire(self): - with self.app.connection_or_acquire(block=True): - self.assertTrue(self.app.pool._dirty) - - with self.app.connection_or_acquire(pool=False): - self.assertFalse(self.app.pool._dirty) - - def test_maybe_close_pool(self): - cpool = self.app._pool = Mock() - amqp = self.app.__dict__['amqp'] = Mock() - ppool = amqp._producer_pool - self.app._maybe_close_pool() - cpool.force_close_all.assert_called_with() - ppool.force_close_all.assert_called_with() - self.assertIsNone(self.app._pool) - self.assertIsNone(self.app.__dict__['amqp']._producer_pool) - - self.app._pool = Mock() - self.app._maybe_close_pool() - self.app._maybe_close_pool() - - def test_using_v1_reduce(self): - self.app._using_v1_reduce = True - self.assertTrue(loads(dumps(self.app))) - - def test_autodiscover_tasks_force(self): - self.app.loader.autodiscover_tasks = Mock() - self.app.autodiscover_tasks(['proj.A', 'proj.B'], force=True) - self.app.loader.autodiscover_tasks.assert_called_with( - ['proj.A', 'proj.B'], 'tasks', - ) - self.app.loader.autodiscover_tasks = Mock() - self.app.autodiscover_tasks( - lambda: ['proj.A', 'proj.B'], - related_name='george', - force=True, - ) - self.app.loader.autodiscover_tasks.assert_called_with( - ['proj.A', 'proj.B'], 'george', - ) - - def test_autodiscover_tasks_lazy(self): - with patch('celery.signals.import_modules') as import_modules: - - def packages(): - return [1, 2, 3] - - self.app.autodiscover_tasks(packages) - self.assertTrue(import_modules.connect.called) - prom = import_modules.connect.call_args[0][0] - self.assertIsInstance(prom, promise) - self.assertEqual(prom.fun, self.app._autodiscover_tasks) - self.assertEqual(prom.args[0](), [1, 2, 3]) - - @with_environ('CELERY_BROKER_URL', '') - def test_with_broker(self): - with self.Celery(broker='foo://baribaz') as app: - self.assertEqual(app.conf.BROKER_URL, 'foo://baribaz') - - def test_repr(self): - self.assertTrue(repr(self.app)) - - def test_custom_task_registry(self): - with self.Celery(tasks=self.app.tasks) as app2: - self.assertIs(app2.tasks, self.app.tasks) - - def test_include_argument(self): - with self.Celery(include=('foo', 'bar.foo')) as app: - self.assertEqual(app.conf.CELERY_IMPORTS, ('foo', 'bar.foo')) - - def test_set_as_current(self): - current = _state._tls.current_app - try: - app = self.Celery(set_as_current=True) - self.assertIs(_state._tls.current_app, app) - finally: - _state._tls.current_app = current - - def test_current_task(self): - @self.app.task - def foo(shared=False): - pass - - _state._task_stack.push(foo) - try: - self.assertEqual(self.app.current_task.name, foo.name) - finally: - _state._task_stack.pop() - - def test_task_not_shared(self): - with patch('celery.app.base.connect_on_app_finalize') as sh: - @self.app.task(shared=False) - def foo(): - pass - self.assertFalse(sh.called) - - def test_task_compat_with_filter(self): - with self.Celery(accept_magic_kwargs=True) as app: - check = Mock() - - def filter(task): - check(task) - return task - - @app.task(filter=filter, shared=False) - def foo(): - pass - check.assert_called_with(foo) - - def test_task_with_filter(self): - with self.Celery(accept_magic_kwargs=False) as app: - check = Mock() - - def filter(task): - check(task) - return task - - assert not _appbase._EXECV - - @app.task(filter=filter, shared=False) - def foo(): - pass - check.assert_called_with(foo) - - def test_task_sets_main_name_MP_MAIN_FILE(self): - from celery import utils as _utils - _utils.MP_MAIN_FILE = __file__ - try: - with self.Celery('xuzzy') as app: - - @app.task - def foo(): - pass - - self.assertEqual(foo.name, 'xuzzy.foo') - finally: - _utils.MP_MAIN_FILE = None - - def test_annotate_decorator(self): - from celery.app.task import Task - - class adX(Task): - abstract = True - - def run(self, y, z, x): - return y, z, x - - check = Mock() - - def deco(fun): - - def _inner(*args, **kwargs): - check(*args, **kwargs) - return fun(*args, **kwargs) - return _inner - - self.app.conf.CELERY_ANNOTATIONS = { - adX.name: {'@__call__': deco} - } - adX.bind(self.app) - self.assertIs(adX.app, self.app) - - i = adX() - i(2, 4, x=3) - check.assert_called_with(i, 2, 4, x=3) - - i.annotate() - i.annotate() - - def test_apply_async_has__self__(self): - @self.app.task(__self__='hello', shared=False) - def aawsX(): - pass - - with patch('celery.app.amqp.TaskProducer.publish_task') as dt: - aawsX.apply_async((4, 5)) - args = dt.call_args[0][1] - self.assertEqual(args, ('hello', 4, 5)) - - def test_apply_async_adds_children(self): - from celery._state import _task_stack - - @self.app.task(shared=False) - def a3cX1(self): - pass - - @self.app.task(shared=False) - def a3cX2(self): - pass - - _task_stack.push(a3cX1) - try: - a3cX1.push_request(called_directly=False) - try: - res = a3cX2.apply_async(add_to_parent=True) - self.assertIn(res, a3cX1.request.children) - finally: - a3cX1.pop_request() - finally: - _task_stack.pop() - - def test_pickle_app(self): - changes = dict(THE_FOO_BAR='bars', - THE_MII_MAR='jars') - self.app.conf.update(changes) - saved = pickle.dumps(self.app) - self.assertLess(len(saved), 2048) - restored = pickle.loads(saved) - self.assertDictContainsSubset(changes, restored.conf) - - def test_worker_main(self): - from celery.bin import worker as worker_bin - - class worker(worker_bin.worker): - - def execute_from_commandline(self, argv): - return argv - - prev, worker_bin.worker = worker_bin.worker, worker - try: - ret = self.app.worker_main(argv=['--version']) - self.assertListEqual(ret, ['--version']) - finally: - worker_bin.worker = prev - - def test_config_from_envvar(self): - os.environ['CELERYTEST_CONFIG_OBJECT'] = 'celery.tests.app.test_app' - self.app.config_from_envvar('CELERYTEST_CONFIG_OBJECT') - self.assertEqual(self.app.conf.THIS_IS_A_KEY, 'this is a value') - - def assert_config2(self): - self.assertTrue(self.app.conf.LEAVE_FOR_WORK) - self.assertTrue(self.app.conf.MOMENT_TO_STOP) - self.assertEqual(self.app.conf.CALL_ME_BACK, 123456789) - self.assertFalse(self.app.conf.WANT_ME_TO) - self.assertTrue(self.app.conf.UNDERSTAND_ME) - - def test_config_from_object__lazy(self): - conf = ObjectConfig2() - self.app.config_from_object(conf) - self.assertFalse(self.app.loader._conf) - self.assertIs(self.app._config_source, conf) - - self.assert_config2() - - def test_config_from_object__force(self): - self.app.config_from_object(ObjectConfig2(), force=True) - self.assertTrue(self.app.loader._conf) - - self.assert_config2() - - def test_config_from_cmdline(self): - cmdline = ['.always_eager=no', - '.result_backend=/dev/null', - 'celeryd.prefetch_multiplier=368', - '.foobarstring=(string)300', - '.foobarint=(int)300', - '.result_engine_options=(dict){"foo": "bar"}'] - self.app.config_from_cmdline(cmdline, namespace='celery') - self.assertFalse(self.app.conf.CELERY_ALWAYS_EAGER) - self.assertEqual(self.app.conf.CELERY_RESULT_BACKEND, '/dev/null') - self.assertEqual(self.app.conf.CELERYD_PREFETCH_MULTIPLIER, 368) - self.assertEqual(self.app.conf.CELERY_FOOBARSTRING, '300') - self.assertEqual(self.app.conf.CELERY_FOOBARINT, 300) - self.assertDictEqual(self.app.conf.CELERY_RESULT_ENGINE_OPTIONS, - {'foo': 'bar'}) - - def test_compat_setting_CELERY_BACKEND(self): - self.app._preconf = {} # removes result backend set by AppCase - self.app.config_from_object(Object(CELERY_BACKEND='set_by_us')) - self.assertEqual(self.app.conf.CELERY_RESULT_BACKEND, 'set_by_us') - - def test_setting_BROKER_TRANSPORT_OPTIONS(self): - - _args = {'foo': 'bar', 'spam': 'baz'} - - self.app.config_from_object(Object()) - self.assertEqual(self.app.conf.BROKER_TRANSPORT_OPTIONS, {}) - - self.app.config_from_object(Object(BROKER_TRANSPORT_OPTIONS=_args)) - self.assertEqual(self.app.conf.BROKER_TRANSPORT_OPTIONS, _args) - - def test_Windows_log_color_disabled(self): - self.app.IS_WINDOWS = True - self.assertFalse(self.app.log.supports_color(True)) - - def test_compat_setting_CARROT_BACKEND(self): - self.app.config_from_object(Object(CARROT_BACKEND='set_by_us')) - self.assertEqual(self.app.conf.BROKER_TRANSPORT, 'set_by_us') - - def test_WorkController(self): - x = self.app.WorkController - self.assertIs(x.app, self.app) - - def test_Worker(self): - x = self.app.Worker - self.assertIs(x.app, self.app) - - @depends_on_current_app - def test_AsyncResult(self): - x = self.app.AsyncResult('1') - self.assertIs(x.app, self.app) - r = loads(dumps(x)) - # not set as current, so ends up as default app after reduce - self.assertIs(r.app, current_app._get_current_object()) - - def test_get_active_apps(self): - self.assertTrue(list(_state._get_active_apps())) - - app1 = self.Celery() - appid = id(app1) - self.assertIn(app1, _state._get_active_apps()) - app1.close() - del(app1) - - gc.collect() - - # weakref removed from list when app goes out of scope. - with self.assertRaises(StopIteration): - next(app for app in _state._get_active_apps() if id(app) == appid) - - def test_config_from_envvar_more(self, key='CELERY_HARNESS_CFG1'): - self.assertFalse( - self.app.config_from_envvar( - 'HDSAJIHWIQHEWQU', force=True, silent=True), - ) - with self.assertRaises(ImproperlyConfigured): - self.app.config_from_envvar( - 'HDSAJIHWIQHEWQU', force=True, silent=False, - ) - os.environ[key] = __name__ + '.object_config' - self.assertTrue(self.app.config_from_envvar(key, force=True)) - self.assertEqual(self.app.conf['FOO'], 1) - self.assertEqual(self.app.conf['BAR'], 2) - - os.environ[key] = 'unknown_asdwqe.asdwqewqe' - with self.assertRaises(ImportError): - self.app.config_from_envvar(key, silent=False) - self.assertFalse( - self.app.config_from_envvar(key, force=True, silent=True), - ) - - os.environ[key] = __name__ + '.dict_config' - self.assertTrue(self.app.config_from_envvar(key, force=True)) - self.assertEqual(self.app.conf['FOO'], 10) - self.assertEqual(self.app.conf['BAR'], 20) - - @patch('celery.bin.celery.CeleryCommand.execute_from_commandline') - def test_start(self, execute): - self.app.start() - self.assertTrue(execute.called) - - def test_mail_admins(self): - - class Loader(BaseLoader): - - def mail_admins(*args, **kwargs): - return args, kwargs - - self.app.loader = Loader(app=self.app) - self.app.conf.ADMINS = None - self.assertFalse(self.app.mail_admins('Subject', 'Body')) - self.app.conf.ADMINS = [('George Costanza', 'george@vandelay.com')] - self.assertTrue(self.app.mail_admins('Subject', 'Body')) - - def test_amqp_get_broker_info(self): - self.assertDictContainsSubset( - {'hostname': 'localhost', - 'userid': 'guest', - 'password': 'guest', - 'virtual_host': '/'}, - self.app.connection('pyamqp://').info(), - ) - self.app.conf.BROKER_PORT = 1978 - self.app.conf.BROKER_VHOST = 'foo' - self.assertDictContainsSubset( - {'port': 1978, 'virtual_host': 'foo'}, - self.app.connection('pyamqp://:1978/foo').info(), - ) - conn = self.app.connection('pyamqp:////value') - self.assertDictContainsSubset({'virtual_host': '/value'}, - conn.info()) - - def test_amqp_failover_strategy_selection(self): - # Test passing in a string and make sure the string - # gets there untouched - self.app.conf.BROKER_FAILOVER_STRATEGY = 'foo-bar' - self.assertEqual( - self.app.connection('amqp:////value').failover_strategy, - 'foo-bar', - ) - - # Try passing in None - self.app.conf.BROKER_FAILOVER_STRATEGY = None - self.assertEqual( - self.app.connection('amqp:////value').failover_strategy, - itertools.cycle, - ) - - # Test passing in a method - def my_failover_strategy(it): - yield True - - self.app.conf.BROKER_FAILOVER_STRATEGY = my_failover_strategy - self.assertEqual( - self.app.connection('amqp:////value').failover_strategy, - my_failover_strategy, - ) - - def test_BROKER_BACKEND_alias(self): - self.assertEqual(self.app.conf.BROKER_BACKEND, - self.app.conf.BROKER_TRANSPORT) - - def test_after_fork(self): - p = self.app._pool = Mock() - self.app._after_fork(self.app) - p.force_close_all.assert_called_with() - self.assertIsNone(self.app._pool) - self.app._after_fork(self.app) - - def test_pool_no_multiprocessing(self): - with mask_modules('multiprocessing.util'): - pool = self.app.pool - self.assertIs(pool, self.app._pool) - - def test_bugreport(self): - self.assertTrue(self.app.bugreport()) - - def test_send_task_sent_event(self): - - class Dispatcher(object): - sent = [] - - def publish(self, type, fields, *args, **kwargs): - self.sent.append((type, fields)) - - conn = self.app.connection() - chan = conn.channel() - try: - for e in ('foo_exchange', 'moo_exchange', 'bar_exchange'): - chan.exchange_declare(e, 'direct', durable=True) - chan.queue_declare(e, durable=True) - chan.queue_bind(e, e, e) - finally: - chan.close() - assert conn.transport_cls == 'memory' - - prod = self.app.amqp.TaskProducer( - conn, exchange=Exchange('foo_exchange'), - send_sent_event=True, - ) - - dispatcher = Dispatcher() - self.assertTrue(prod.publish_task('footask', (), {}, - exchange='moo_exchange', - routing_key='moo_exchange', - event_dispatcher=dispatcher)) - self.assertTrue(dispatcher.sent) - self.assertEqual(dispatcher.sent[0][0], 'task-sent') - self.assertTrue(prod.publish_task('footask', (), {}, - event_dispatcher=dispatcher, - exchange='bar_exchange', - routing_key='bar_exchange')) - - def test_error_mail_sender(self): - x = ErrorMail.subject % {'name': 'task_name', - 'id': uuid(), - 'exc': 'FOOBARBAZ', - 'hostname': 'lana'} - self.assertTrue(x) - - def test_error_mail_disabled(self): - task = Mock() - x = ErrorMail(task) - x.should_send = Mock() - x.should_send.return_value = False - x.send(Mock(), Mock()) - self.assertFalse(task.app.mail_admins.called) - - -class test_defaults(AppCase): - - def test_strtobool(self): - for s in ('false', 'no', '0'): - self.assertFalse(defaults.strtobool(s)) - for s in ('true', 'yes', '1'): - self.assertTrue(defaults.strtobool(s)) - with self.assertRaises(TypeError): - defaults.strtobool('unsure') - - -class test_debugging_utils(AppCase): - - def test_enable_disable_trace(self): - try: - _app.enable_trace() - self.assertEqual(_app.app_or_default, _app._app_or_default_trace) - _app.disable_trace() - self.assertEqual(_app.app_or_default, _app._app_or_default) - finally: - _app.disable_trace() - - -class test_pyimplementation(AppCase): - - def test_platform_python_implementation(self): - with platform_pyimp(lambda: 'Xython'): - self.assertEqual(pyimplementation(), 'Xython') - - def test_platform_jython(self): - with platform_pyimp(): - with sys_platform('java 1.6.51'): - self.assertIn('Jython', pyimplementation()) - - def test_platform_pypy(self): - with platform_pyimp(): - with sys_platform('darwin'): - with pypy_version((1, 4, 3)): - self.assertIn('PyPy', pyimplementation()) - with pypy_version((1, 4, 3, 'a4')): - self.assertIn('PyPy', pyimplementation()) - - def test_platform_fallback(self): - with platform_pyimp(): - with sys_platform('darwin'): - with pypy_version(): - self.assertEqual('CPython', pyimplementation()) - - -class test_shared_task(AppCase): - - def test_registers_to_all_apps(self): - with self.Celery('xproj', set_as_current=True) as xproj: - xproj.finalize() - - @shared_task - def foo(): - return 42 - - @shared_task() - def bar(): - return 84 - - self.assertIs(foo.app, xproj) - self.assertIs(bar.app, xproj) - self.assertTrue(foo._get_current_object()) - - with self.Celery('yproj', set_as_current=True) as yproj: - self.assertIs(foo.app, yproj) - self.assertIs(bar.app, yproj) - - @shared_task() - def baz(): - return 168 - - self.assertIs(baz.app, yproj) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_beat.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_beat.py deleted file mode 100644 index 67e4f53..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_beat.py +++ /dev/null @@ -1,539 +0,0 @@ -from __future__ import absolute_import - -import errno - -from datetime import datetime, timedelta -from pickle import dumps, loads - -from celery import beat -from celery.five import keys, string_t -from celery.schedules import schedule -from celery.utils import uuid -from celery.tests.case import AppCase, Mock, SkipTest, call, patch - - -class Object(object): - pass - - -class MockShelve(dict): - closed = False - synced = False - - def close(self): - self.closed = True - - def sync(self): - self.synced = True - - -class MockService(object): - started = False - stopped = False - - def __init__(self, *args, **kwargs): - pass - - def start(self, **kwargs): - self.started = True - - def stop(self, **kwargs): - self.stopped = True - - -class test_ScheduleEntry(AppCase): - Entry = beat.ScheduleEntry - - def create_entry(self, **kwargs): - entry = dict( - name='celery.unittest.add', - schedule=timedelta(seconds=10), - args=(2, 2), - options={'routing_key': 'cpu'}, - app=self.app, - ) - return self.Entry(**dict(entry, **kwargs)) - - def test_next(self): - entry = self.create_entry(schedule=10) - self.assertTrue(entry.last_run_at) - self.assertIsInstance(entry.last_run_at, datetime) - self.assertEqual(entry.total_run_count, 0) - - next_run_at = entry.last_run_at + timedelta(seconds=10) - next_entry = entry.next(next_run_at) - self.assertGreaterEqual(next_entry.last_run_at, next_run_at) - self.assertEqual(next_entry.total_run_count, 1) - - def test_is_due(self): - entry = self.create_entry(schedule=timedelta(seconds=10)) - self.assertIs(entry.app, self.app) - self.assertIs(entry.schedule.app, self.app) - due1, next_time_to_run1 = entry.is_due() - self.assertFalse(due1) - self.assertGreater(next_time_to_run1, 9) - - next_run_at = entry.last_run_at - timedelta(seconds=10) - next_entry = entry.next(next_run_at) - due2, next_time_to_run2 = next_entry.is_due() - self.assertTrue(due2) - self.assertGreater(next_time_to_run2, 9) - - def test_repr(self): - entry = self.create_entry() - self.assertIn(' 1: - return s.sh - raise OSError() - opens.side_effect = effect - s.setup_schedule() - s._remove_db.assert_called_with() - - s._store = {'__version__': 1} - s.setup_schedule() - - s._store.clear = Mock() - op = s.persistence.open = Mock() - op.return_value = s._store - s._store['tz'] = 'FUNKY' - s.setup_schedule() - op.assert_called_with(s.schedule_filename, writeback=True) - s._store.clear.assert_called_with() - s._store['utc_enabled'] = False - s._store.clear = Mock() - s.setup_schedule() - s._store.clear.assert_called_with() - - def test_get_schedule(self): - s = create_persistent_scheduler()[0]( - schedule_filename='schedule', app=self.app, - ) - s._store = {'entries': {}} - s.schedule = {'foo': 'bar'} - self.assertDictEqual(s.schedule, {'foo': 'bar'}) - self.assertDictEqual(s._store['entries'], s.schedule) - - -class test_Service(AppCase): - - def get_service(self): - Scheduler, mock_shelve = create_persistent_scheduler() - return beat.Service(app=self.app, scheduler_cls=Scheduler), mock_shelve - - def test_pickleable(self): - s = beat.Service(app=self.app, scheduler_cls=Mock) - self.assertTrue(loads(dumps(s))) - - def test_start(self): - s, sh = self.get_service() - schedule = s.scheduler.schedule - self.assertIsInstance(schedule, dict) - self.assertIsInstance(s.scheduler, beat.Scheduler) - scheduled = list(schedule.keys()) - for task_name in keys(sh['entries']): - self.assertIn(task_name, scheduled) - - s.sync() - self.assertTrue(sh.closed) - self.assertTrue(sh.synced) - self.assertTrue(s._is_stopped.isSet()) - s.sync() - s.stop(wait=False) - self.assertTrue(s._is_shutdown.isSet()) - s.stop(wait=True) - self.assertTrue(s._is_shutdown.isSet()) - - p = s.scheduler._store - s.scheduler._store = None - try: - s.scheduler.sync() - finally: - s.scheduler._store = p - - def test_start_embedded_process(self): - s, sh = self.get_service() - s._is_shutdown.set() - s.start(embedded_process=True) - - def test_start_thread(self): - s, sh = self.get_service() - s._is_shutdown.set() - s.start(embedded_process=False) - - def test_start_tick_raises_exit_error(self): - s, sh = self.get_service() - s.scheduler.tick_raises_exit = True - s.start() - self.assertTrue(s._is_shutdown.isSet()) - - def test_start_manages_one_tick_before_shutdown(self): - s, sh = self.get_service() - s.scheduler.shutdown_service = s - s.start() - self.assertTrue(s._is_shutdown.isSet()) - - -class test_EmbeddedService(AppCase): - - def test_start_stop_process(self): - try: - import _multiprocessing # noqa - except ImportError: - raise SkipTest('multiprocessing not available') - - from billiard.process import Process - - s = beat.EmbeddedService(self.app) - self.assertIsInstance(s, Process) - self.assertIsInstance(s.service, beat.Service) - s.service = MockService() - - class _Popen(object): - terminated = False - - def terminate(self): - self.terminated = True - - with patch('celery.platforms.close_open_fds'): - s.run() - self.assertTrue(s.service.started) - - s._popen = _Popen() - s.stop() - self.assertTrue(s.service.stopped) - self.assertTrue(s._popen.terminated) - - def test_start_stop_threaded(self): - s = beat.EmbeddedService(self.app, thread=True) - from threading import Thread - self.assertIsInstance(s, Thread) - self.assertIsInstance(s.service, beat.Service) - s.service = MockService() - - s.run() - self.assertTrue(s.service.started) - - s.stop() - self.assertTrue(s.service.stopped) - - -class test_schedule(AppCase): - - def test_maybe_make_aware(self): - x = schedule(10, app=self.app) - x.utc_enabled = True - d = x.maybe_make_aware(datetime.utcnow()) - self.assertTrue(d.tzinfo) - x.utc_enabled = False - d2 = x.maybe_make_aware(datetime.utcnow()) - self.assertIsNone(d2.tzinfo) - - def test_to_local(self): - x = schedule(10, app=self.app) - x.utc_enabled = True - d = x.to_local(datetime.utcnow()) - self.assertIsNone(d.tzinfo) - x.utc_enabled = False - d = x.to_local(datetime.utcnow()) - self.assertTrue(d.tzinfo) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_builtins.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_builtins.py deleted file mode 100644 index 0d04a52..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_builtins.py +++ /dev/null @@ -1,217 +0,0 @@ -from __future__ import absolute_import - -from celery import group, chord -from celery.app import builtins -from celery.canvas import Signature -from celery.five import range -from celery._state import _task_stack -from celery.tests.case import AppCase, Mock, patch - - -class BuiltinsCase(AppCase): - - def setup(self): - @self.app.task(shared=False) - def xsum(x): - return sum(x) - self.xsum = xsum - - @self.app.task(shared=False) - def add(x, y): - return x + y - self.add = add - - -class test_backend_cleanup(BuiltinsCase): - - def test_run(self): - self.app.backend.cleanup = Mock() - self.app.backend.cleanup.__name__ = 'cleanup' - cleanup_task = builtins.add_backend_cleanup_task(self.app) - cleanup_task() - self.assertTrue(self.app.backend.cleanup.called) - - -class test_map(BuiltinsCase): - - def test_run(self): - - @self.app.task(shared=False) - def map_mul(x): - return x[0] * x[1] - - res = self.app.tasks['celery.map']( - map_mul, [(2, 2), (4, 4), (8, 8)], - ) - self.assertEqual(res, [4, 16, 64]) - - -class test_starmap(BuiltinsCase): - - def test_run(self): - - @self.app.task(shared=False) - def smap_mul(x, y): - return x * y - - res = self.app.tasks['celery.starmap']( - smap_mul, [(2, 2), (4, 4), (8, 8)], - ) - self.assertEqual(res, [4, 16, 64]) - - -class test_chunks(BuiltinsCase): - - @patch('celery.canvas.chunks.apply_chunks') - def test_run(self, apply_chunks): - - @self.app.task(shared=False) - def chunks_mul(l): - return l - - self.app.tasks['celery.chunks']( - chunks_mul, [(2, 2), (4, 4), (8, 8)], 1, - ) - self.assertTrue(apply_chunks.called) - - -class test_group(BuiltinsCase): - - def setup(self): - self.task = builtins.add_group_task(self.app)() - super(test_group, self).setup() - - def test_apply_async_eager(self): - self.task.apply = Mock() - self.app.conf.CELERY_ALWAYS_EAGER = True - self.task.apply_async() - self.assertTrue(self.task.apply.called) - - def test_apply(self): - x = group([self.add.s(4, 4), self.add.s(8, 8)]) - x.name = self.task.name - res = x.apply() - self.assertEqual(res.get(), [8, 16]) - - def test_apply_async(self): - x = group([self.add.s(4, 4), self.add.s(8, 8)]) - x.apply_async() - - def test_apply_empty(self): - x = group(app=self.app) - x.apply() - res = x.apply_async() - self.assertTrue(res) - self.assertFalse(res.results) - - def test_apply_async_with_parent(self): - _task_stack.push(self.add) - try: - self.add.push_request(called_directly=False) - try: - assert not self.add.request.children - x = group([self.add.s(4, 4), self.add.s(8, 8)]) - res = x() - self.assertTrue(self.add.request.children) - self.assertIn(res, self.add.request.children) - self.assertEqual(len(self.add.request.children), 1) - finally: - self.add.pop_request() - finally: - _task_stack.pop() - - -class test_chain(BuiltinsCase): - - def setup(self): - BuiltinsCase.setup(self) - self.task = builtins.add_chain_task(self.app)() - - def test_apply_async(self): - c = self.add.s(2, 2) | self.add.s(4) | self.add.s(8) - result = c.apply_async() - self.assertTrue(result.parent) - self.assertTrue(result.parent.parent) - self.assertIsNone(result.parent.parent.parent) - - def test_group_to_chord(self): - c = ( - group(self.add.s(i, i) for i in range(5)) | - self.add.s(10) | - self.add.s(20) | - self.add.s(30) - ) - tasks, _ = c.type.prepare_steps((), c.tasks) - self.assertIsInstance(tasks[0], chord) - self.assertTrue(tasks[0].body.options['link']) - self.assertTrue(tasks[0].body.options['link'][0].options['link']) - - c2 = self.add.s(2, 2) | group(self.add.s(i, i) for i in range(10)) - tasks2, _ = c2.type.prepare_steps((), c2.tasks) - self.assertIsInstance(tasks2[1], group) - - def test_apply_options(self): - - class static(Signature): - - def clone(self, *args, **kwargs): - return self - - def s(*args, **kwargs): - return static(self.add, args, kwargs, type=self.add) - - c = s(2, 2) | s(4, 4) | s(8, 8) - r1 = c.apply_async(task_id='some_id') - self.assertEqual(r1.id, 'some_id') - - c.apply_async(group_id='some_group_id') - self.assertEqual(c.tasks[-1].options['group_id'], 'some_group_id') - - c.apply_async(chord='some_chord_id') - self.assertEqual(c.tasks[-1].options['chord'], 'some_chord_id') - - c.apply_async(link=[s(32)]) - self.assertListEqual(c.tasks[-1].options['link'], [s(32)]) - - c.apply_async(link_error=[s('error')]) - for task in c.tasks: - self.assertListEqual(task.options['link_error'], [s('error')]) - - -class test_chord(BuiltinsCase): - - def setup(self): - self.task = builtins.add_chord_task(self.app)() - super(test_chord, self).setup() - - def test_apply_async(self): - x = chord([self.add.s(i, i) for i in range(10)], body=self.xsum.s()) - r = x.apply_async() - self.assertTrue(r) - self.assertTrue(r.parent) - - def test_run_header_not_group(self): - self.task([self.add.s(i, i) for i in range(10)], self.xsum.s()) - - def test_forward_options(self): - body = self.xsum.s() - x = chord([self.add.s(i, i) for i in range(10)], body=body) - x._type = Mock() - x._type.app.conf.CELERY_ALWAYS_EAGER = False - x.apply_async(group_id='some_group_id') - self.assertTrue(x._type.called) - resbody = x._type.call_args[0][1] - self.assertEqual(resbody.options['group_id'], 'some_group_id') - x2 = chord([self.add.s(i, i) for i in range(10)], body=body) - x2._type = Mock() - x2._type.app.conf.CELERY_ALWAYS_EAGER = False - x2.apply_async(chord='some_chord_id') - self.assertTrue(x2._type.called) - resbody = x2._type.call_args[0][1] - self.assertEqual(resbody.options['chord'], 'some_chord_id') - - def test_apply_eager(self): - self.app.conf.CELERY_ALWAYS_EAGER = True - x = chord([self.add.s(i, i) for i in range(10)], body=self.xsum.s()) - r = x.apply_async() - self.assertEqual(r.get(), 90) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_celery.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_celery.py deleted file mode 100644 index 5088d35..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_celery.py +++ /dev/null @@ -1,18 +0,0 @@ -from __future__ import absolute_import -from celery.tests.case import AppCase - -import celery - - -class test_celery_package(AppCase): - - def test_version(self): - self.assertTrue(celery.VERSION) - self.assertGreaterEqual(len(celery.VERSION), 3) - celery.VERSION = (0, 3, 0) - self.assertGreaterEqual(celery.__version__.count('.'), 2) - - def test_meta(self): - for m in ('__author__', '__contact__', '__homepage__', - '__docformat__'): - self.assertTrue(getattr(celery, m, None)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_control.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_control.py deleted file mode 100644 index 7a05506..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_control.py +++ /dev/null @@ -1,251 +0,0 @@ -from __future__ import absolute_import - -from functools import wraps - -from kombu.pidbox import Mailbox - -from celery.app import control -from celery.exceptions import DuplicateNodenameWarning -from celery.utils import uuid -from celery.tests.case import AppCase - - -class MockMailbox(Mailbox): - sent = [] - - def _publish(self, command, *args, **kwargs): - self.__class__.sent.append(command) - - def close(self): - pass - - def _collect(self, *args, **kwargs): - pass - - -class Control(control.Control): - Mailbox = MockMailbox - - -def with_mock_broadcast(fun): - - @wraps(fun) - def _resets(*args, **kwargs): - MockMailbox.sent = [] - try: - return fun(*args, **kwargs) - finally: - MockMailbox.sent = [] - return _resets - - -class test_flatten_reply(AppCase): - - def test_flatten_reply(self): - reply = [ - {'foo@example.com': {'hello': 10}}, - {'foo@example.com': {'hello': 20}}, - {'bar@example.com': {'hello': 30}} - ] - with self.assertWarns(DuplicateNodenameWarning) as w: - nodes = control.flatten_reply(reply) - - self.assertIn( - 'Received multiple replies from node name: foo@example.com.', - str(w.warning) - ) - self.assertIn('foo@example.com', nodes) - self.assertIn('bar@example.com', nodes) - - -class test_inspect(AppCase): - - def setup(self): - self.c = Control(app=self.app) - self.prev, self.app.control = self.app.control, self.c - self.i = self.c.inspect() - - def test_prepare_reply(self): - self.assertDictEqual(self.i._prepare([{'w1': {'ok': 1}}, - {'w2': {'ok': 1}}]), - {'w1': {'ok': 1}, 'w2': {'ok': 1}}) - - i = self.c.inspect(destination='w1') - self.assertEqual(i._prepare([{'w1': {'ok': 1}}]), - {'ok': 1}) - - @with_mock_broadcast - def test_active(self): - self.i.active() - self.assertIn('dump_active', MockMailbox.sent) - - @with_mock_broadcast - def test_clock(self): - self.i.clock() - self.assertIn('clock', MockMailbox.sent) - - @with_mock_broadcast - def test_conf(self): - self.i.conf() - self.assertIn('dump_conf', MockMailbox.sent) - - @with_mock_broadcast - def test_hello(self): - self.i.hello('george@vandelay.com') - self.assertIn('hello', MockMailbox.sent) - - @with_mock_broadcast - def test_memsample(self): - self.i.memsample() - self.assertIn('memsample', MockMailbox.sent) - - @with_mock_broadcast - def test_memdump(self): - self.i.memdump() - self.assertIn('memdump', MockMailbox.sent) - - @with_mock_broadcast - def test_objgraph(self): - self.i.objgraph() - self.assertIn('objgraph', MockMailbox.sent) - - @with_mock_broadcast - def test_scheduled(self): - self.i.scheduled() - self.assertIn('dump_schedule', MockMailbox.sent) - - @with_mock_broadcast - def test_reserved(self): - self.i.reserved() - self.assertIn('dump_reserved', MockMailbox.sent) - - @with_mock_broadcast - def test_stats(self): - self.i.stats() - self.assertIn('stats', MockMailbox.sent) - - @with_mock_broadcast - def test_revoked(self): - self.i.revoked() - self.assertIn('dump_revoked', MockMailbox.sent) - - @with_mock_broadcast - def test_tasks(self): - self.i.registered() - self.assertIn('dump_tasks', MockMailbox.sent) - - @with_mock_broadcast - def test_ping(self): - self.i.ping() - self.assertIn('ping', MockMailbox.sent) - - @with_mock_broadcast - def test_active_queues(self): - self.i.active_queues() - self.assertIn('active_queues', MockMailbox.sent) - - @with_mock_broadcast - def test_report(self): - self.i.report() - self.assertIn('report', MockMailbox.sent) - - -class test_Broadcast(AppCase): - - def setup(self): - self.control = Control(app=self.app) - self.app.control = self.control - - @self.app.task(shared=False) - def mytask(): - pass - self.mytask = mytask - - def test_purge(self): - self.control.purge() - - @with_mock_broadcast - def test_broadcast(self): - self.control.broadcast('foobarbaz', arguments=[]) - self.assertIn('foobarbaz', MockMailbox.sent) - - @with_mock_broadcast - def test_broadcast_limit(self): - self.control.broadcast( - 'foobarbaz1', arguments=[], limit=None, destination=[1, 2, 3], - ) - self.assertIn('foobarbaz1', MockMailbox.sent) - - @with_mock_broadcast - def test_broadcast_validate(self): - with self.assertRaises(ValueError): - self.control.broadcast('foobarbaz2', - destination='foo') - - @with_mock_broadcast - def test_rate_limit(self): - self.control.rate_limit(self.mytask.name, '100/m') - self.assertIn('rate_limit', MockMailbox.sent) - - @with_mock_broadcast - def test_time_limit(self): - self.control.time_limit(self.mytask.name, soft=10, hard=20) - self.assertIn('time_limit', MockMailbox.sent) - - @with_mock_broadcast - def test_add_consumer(self): - self.control.add_consumer('foo') - self.assertIn('add_consumer', MockMailbox.sent) - - @with_mock_broadcast - def test_cancel_consumer(self): - self.control.cancel_consumer('foo') - self.assertIn('cancel_consumer', MockMailbox.sent) - - @with_mock_broadcast - def test_enable_events(self): - self.control.enable_events() - self.assertIn('enable_events', MockMailbox.sent) - - @with_mock_broadcast - def test_disable_events(self): - self.control.disable_events() - self.assertIn('disable_events', MockMailbox.sent) - - @with_mock_broadcast - def test_revoke(self): - self.control.revoke('foozbaaz') - self.assertIn('revoke', MockMailbox.sent) - - @with_mock_broadcast - def test_ping(self): - self.control.ping() - self.assertIn('ping', MockMailbox.sent) - - @with_mock_broadcast - def test_election(self): - self.control.election('some_id', 'topic', 'action') - self.assertIn('election', MockMailbox.sent) - - @with_mock_broadcast - def test_pool_grow(self): - self.control.pool_grow(2) - self.assertIn('pool_grow', MockMailbox.sent) - - @with_mock_broadcast - def test_pool_shrink(self): - self.control.pool_shrink(2) - self.assertIn('pool_shrink', MockMailbox.sent) - - @with_mock_broadcast - def test_revoke_from_result(self): - self.app.AsyncResult('foozbazzbar').revoke() - self.assertIn('revoke', MockMailbox.sent) - - @with_mock_broadcast - def test_revoke_from_resultset(self): - r = self.app.GroupResult(uuid(), - [self.app.AsyncResult(x) - for x in [uuid() for i in range(10)]]) - r.revoke() - self.assertIn('revoke', MockMailbox.sent) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_defaults.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_defaults.py deleted file mode 100644 index bf87f80..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_defaults.py +++ /dev/null @@ -1,60 +0,0 @@ -from __future__ import absolute_import - -import sys - -from importlib import import_module - -from celery.app.defaults import NAMESPACES - -from celery.tests.case import ( - AppCase, Mock, patch, pypy_version, sys_platform, -) - - -class test_defaults(AppCase): - - def setup(self): - self._prev = sys.modules.pop('celery.app.defaults', None) - - def teardown(self): - if self._prev: - sys.modules['celery.app.defaults'] = self._prev - - def test_option_repr(self): - self.assertTrue(repr(NAMESPACES['BROKER']['URL'])) - - def test_any(self): - val = object() - self.assertIs(self.defaults.Option.typemap['any'](val), val) - - def test_default_pool_pypy_14(self): - with sys_platform('darwin'): - with pypy_version((1, 4, 0)): - self.assertEqual(self.defaults.DEFAULT_POOL, 'solo') - - def test_default_pool_pypy_15(self): - with sys_platform('darwin'): - with pypy_version((1, 5, 0)): - self.assertEqual(self.defaults.DEFAULT_POOL, 'prefork') - - def test_deprecated(self): - source = Mock() - source.CELERYD_LOG_LEVEL = 2 - with patch('celery.utils.warn_deprecated') as warn: - self.defaults.find_deprecated_settings(source) - self.assertTrue(warn.called) - - def test_default_pool_jython(self): - with sys_platform('java 1.6.51'): - self.assertEqual(self.defaults.DEFAULT_POOL, 'threads') - - def test_find(self): - find = self.defaults.find - - self.assertEqual(find('server_email')[2].default, 'celery@localhost') - self.assertEqual(find('default_queue')[2].default, 'celery') - self.assertEqual(find('celery_default_exchange')[2], 'celery') - - @property - def defaults(self): - return import_module('celery.app.defaults') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_exceptions.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_exceptions.py deleted file mode 100644 index 25d2b4e..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_exceptions.py +++ /dev/null @@ -1,35 +0,0 @@ -from __future__ import absolute_import - -import pickle - -from datetime import datetime - -from celery.exceptions import Reject, Retry - -from celery.tests.case import AppCase - - -class test_Retry(AppCase): - - def test_when_datetime(self): - x = Retry('foo', KeyError(), when=datetime.utcnow()) - self.assertTrue(x.humanize()) - - def test_pickleable(self): - x = Retry('foo', KeyError(), when=datetime.utcnow()) - self.assertTrue(pickle.loads(pickle.dumps(x))) - - -class test_Reject(AppCase): - - def test_attrs(self): - x = Reject('foo', requeue=True) - self.assertEqual(x.reason, 'foo') - self.assertTrue(x.requeue) - - def test_repr(self): - self.assertTrue(repr(Reject('foo', True))) - - def test_pickleable(self): - x = Retry('foo', True) - self.assertTrue(pickle.loads(pickle.dumps(x))) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_loaders.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_loaders.py deleted file mode 100644 index cc9fb55..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_loaders.py +++ /dev/null @@ -1,275 +0,0 @@ -from __future__ import absolute_import - -import os -import sys -import warnings - -from celery import loaders -from celery.exceptions import ( - NotConfigured, -) -from celery.loaders import base -from celery.loaders import default -from celery.loaders.app import AppLoader -from celery.utils.imports import NotAPackage -from celery.utils.mail import SendmailWarning - -from celery.tests.case import ( - AppCase, Case, Mock, depends_on_current_app, patch, with_environ, -) - - -class DummyLoader(base.BaseLoader): - - def read_configuration(self): - return {'foo': 'bar', 'CELERY_IMPORTS': ('os', 'sys')} - - -class test_loaders(AppCase): - - def test_get_loader_cls(self): - self.assertEqual(loaders.get_loader_cls('default'), - default.Loader) - - @depends_on_current_app - def test_current_loader(self): - with self.assertPendingDeprecation(): - self.assertIs(loaders.current_loader(), self.app.loader) - - @depends_on_current_app - def test_load_settings(self): - with self.assertPendingDeprecation(): - self.assertIs(loaders.load_settings(), self.app.conf) - - -class test_LoaderBase(AppCase): - message_options = {'subject': 'Subject', - 'body': 'Body', - 'sender': 'x@x.com', - 'to': 'y@x.com'} - server_options = {'host': 'smtp.x.com', - 'port': 1234, - 'user': 'x', - 'password': 'qwerty', - 'timeout': 3} - - def setup(self): - self.loader = DummyLoader(app=self.app) - - def test_handlers_pass(self): - self.loader.on_task_init('foo.task', 'feedface-cafebabe') - self.loader.on_worker_init() - - def test_now(self): - self.assertTrue(self.loader.now(utc=True)) - self.assertTrue(self.loader.now(utc=False)) - - def test_read_configuration_no_env(self): - self.assertDictEqual( - base.BaseLoader(app=self.app).read_configuration( - 'FOO_X_S_WE_WQ_Q_WE'), - {}, - ) - - def test_autodiscovery(self): - with patch('celery.loaders.base.autodiscover_tasks') as auto: - auto.return_value = [Mock()] - auto.return_value[0].__name__ = 'moo' - self.loader.autodiscover_tasks(['A', 'B']) - self.assertIn('moo', self.loader.task_modules) - self.loader.task_modules.discard('moo') - - def test_import_task_module(self): - self.assertEqual(sys, self.loader.import_task_module('sys')) - - def test_init_worker_process(self): - self.loader.on_worker_process_init() - m = self.loader.on_worker_process_init = Mock() - self.loader.init_worker_process() - m.assert_called_with() - - def test_config_from_object_module(self): - self.loader.import_from_cwd = Mock() - self.loader.config_from_object('module_name') - self.loader.import_from_cwd.assert_called_with('module_name') - - def test_conf_property(self): - self.assertEqual(self.loader.conf['foo'], 'bar') - self.assertEqual(self.loader._conf['foo'], 'bar') - self.assertEqual(self.loader.conf['foo'], 'bar') - - def test_import_default_modules(self): - def modnames(l): - return [m.__name__ for m in l] - self.app.conf.CELERY_IMPORTS = ('os', 'sys') - self.assertEqual( - sorted(modnames(self.loader.import_default_modules())), - sorted(modnames([os, sys])), - ) - - def test_import_from_cwd_custom_imp(self): - - def imp(module, package=None): - imp.called = True - imp.called = False - - self.loader.import_from_cwd('foo', imp=imp) - self.assertTrue(imp.called) - - @patch('celery.utils.mail.Mailer._send') - def test_mail_admins_errors(self, send): - send.side_effect = KeyError() - opts = dict(self.message_options, **self.server_options) - - with self.assertWarnsRegex(SendmailWarning, r'KeyError'): - self.loader.mail_admins(fail_silently=True, **opts) - - with self.assertRaises(KeyError): - self.loader.mail_admins(fail_silently=False, **opts) - - @patch('celery.utils.mail.Mailer._send') - def test_mail_admins(self, send): - opts = dict(self.message_options, **self.server_options) - self.loader.mail_admins(**opts) - self.assertTrue(send.call_args) - message = send.call_args[0][0] - self.assertEqual(message.to, [self.message_options['to']]) - self.assertEqual(message.subject, self.message_options['subject']) - self.assertEqual(message.sender, self.message_options['sender']) - self.assertEqual(message.body, self.message_options['body']) - - def test_mail_attribute(self): - from celery.utils import mail - loader = base.BaseLoader(app=self.app) - self.assertIs(loader.mail, mail) - - def test_cmdline_config_ValueError(self): - with self.assertRaises(ValueError): - self.loader.cmdline_config_parser(['broker.port=foobar']) - - -class test_DefaultLoader(AppCase): - - @patch('celery.loaders.base.find_module') - def test_read_configuration_not_a_package(self, find_module): - find_module.side_effect = NotAPackage() - l = default.Loader(app=self.app) - with self.assertRaises(NotAPackage): - l.read_configuration(fail_silently=False) - - @patch('celery.loaders.base.find_module') - @with_environ('CELERY_CONFIG_MODULE', 'celeryconfig.py') - def test_read_configuration_py_in_name(self, find_module): - find_module.side_effect = NotAPackage() - l = default.Loader(app=self.app) - with self.assertRaises(NotAPackage): - l.read_configuration(fail_silently=False) - - @patch('celery.loaders.base.find_module') - def test_read_configuration_importerror(self, find_module): - default.C_WNOCONF = True - find_module.side_effect = ImportError() - l = default.Loader(app=self.app) - with self.assertWarnsRegex(NotConfigured, r'make sure it exists'): - l.read_configuration(fail_silently=True) - default.C_WNOCONF = False - l.read_configuration(fail_silently=True) - - def test_read_configuration(self): - from types import ModuleType - - class ConfigModule(ModuleType): - pass - - configname = os.environ.get('CELERY_CONFIG_MODULE') or 'celeryconfig' - celeryconfig = ConfigModule(configname) - celeryconfig.CELERY_IMPORTS = ('os', 'sys') - - prevconfig = sys.modules.get(configname) - sys.modules[configname] = celeryconfig - try: - l = default.Loader(app=self.app) - l.find_module = Mock(name='find_module') - settings = l.read_configuration(fail_silently=False) - self.assertTupleEqual(settings.CELERY_IMPORTS, ('os', 'sys')) - settings = l.read_configuration(fail_silently=False) - self.assertTupleEqual(settings.CELERY_IMPORTS, ('os', 'sys')) - l.on_worker_init() - finally: - if prevconfig: - sys.modules[configname] = prevconfig - - def test_import_from_cwd(self): - l = default.Loader(app=self.app) - old_path = list(sys.path) - try: - sys.path.remove(os.getcwd()) - except ValueError: - pass - celery = sys.modules.pop('celery', None) - sys.modules.pop('celery.five', None) - try: - self.assertTrue(l.import_from_cwd('celery')) - sys.modules.pop('celery', None) - sys.modules.pop('celery.five', None) - sys.path.insert(0, os.getcwd()) - self.assertTrue(l.import_from_cwd('celery')) - finally: - sys.path = old_path - sys.modules['celery'] = celery - - def test_unconfigured_settings(self): - context_executed = [False] - - class _Loader(default.Loader): - - def find_module(self, name): - raise ImportError(name) - - with warnings.catch_warnings(record=True): - l = _Loader(app=self.app) - self.assertFalse(l.configured) - context_executed[0] = True - self.assertTrue(context_executed[0]) - - -class test_AppLoader(AppCase): - - def setup(self): - self.loader = AppLoader(app=self.app) - - def test_on_worker_init(self): - self.app.conf.CELERY_IMPORTS = ('subprocess', ) - sys.modules.pop('subprocess', None) - self.loader.init_worker() - self.assertIn('subprocess', sys.modules) - - -class test_autodiscovery(Case): - - def test_autodiscover_tasks(self): - base._RACE_PROTECTION = True - try: - base.autodiscover_tasks(['foo']) - finally: - base._RACE_PROTECTION = False - with patch('celery.loaders.base.find_related_module') as frm: - base.autodiscover_tasks(['foo']) - self.assertTrue(frm.called) - - def test_find_related_module(self): - with patch('importlib.import_module') as imp: - with patch('imp.find_module') as find: - imp.return_value = Mock() - imp.return_value.__path__ = 'foo' - base.find_related_module(base, 'tasks') - - def se1(val): - imp.side_effect = AttributeError() - - imp.side_effect = se1 - base.find_related_module(base, 'tasks') - imp.side_effect = None - - find.side_effect = ImportError() - base.find_related_module(base, 'tasks') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_log.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_log.py deleted file mode 100644 index 588e39b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_log.py +++ /dev/null @@ -1,385 +0,0 @@ -from __future__ import absolute_import - -import sys -import logging - -from collections import defaultdict -from io import StringIO -from tempfile import mktemp - -from celery import signals -from celery.app.log import TaskFormatter -from celery.utils.log import LoggingProxy -from celery.utils import uuid -from celery.utils.log import ( - get_logger, - ColorFormatter, - logger as base_logger, - get_task_logger, - task_logger, - in_sighandler, - logger_isa, - ensure_process_aware_logger, -) -from celery.tests.case import ( - AppCase, Mock, SkipTest, - get_handlers, override_stdouts, patch, wrap_logger, restore_logging, -) - - -class test_TaskFormatter(AppCase): - - def test_no_task(self): - class Record(object): - msg = 'hello world' - levelname = 'info' - exc_text = exc_info = None - stack_info = None - - def getMessage(self): - return self.msg - record = Record() - x = TaskFormatter() - x.format(record) - self.assertEqual(record.task_name, '???') - self.assertEqual(record.task_id, '???') - - -class test_logger_isa(AppCase): - - def test_isa(self): - x = get_task_logger('Z1george') - self.assertTrue(logger_isa(x, task_logger)) - prev_x, x.parent = x.parent, None - try: - self.assertFalse(logger_isa(x, task_logger)) - finally: - x.parent = prev_x - - y = get_task_logger('Z1elaine') - y.parent = x - self.assertTrue(logger_isa(y, task_logger)) - self.assertTrue(logger_isa(y, x)) - self.assertTrue(logger_isa(y, y)) - - z = get_task_logger('Z1jerry') - z.parent = y - self.assertTrue(logger_isa(z, task_logger)) - self.assertTrue(logger_isa(z, y)) - self.assertTrue(logger_isa(z, x)) - self.assertTrue(logger_isa(z, z)) - - def test_recursive(self): - x = get_task_logger('X1foo') - prev, x.parent = x.parent, x - try: - with self.assertRaises(RuntimeError): - logger_isa(x, task_logger) - finally: - x.parent = prev - - y = get_task_logger('X2foo') - z = get_task_logger('X2foo') - prev_y, y.parent = y.parent, z - try: - prev_z, z.parent = z.parent, y - try: - with self.assertRaises(RuntimeError): - logger_isa(y, task_logger) - finally: - z.parent = prev_z - finally: - y.parent = prev_y - - -class test_ColorFormatter(AppCase): - - @patch('celery.utils.log.safe_str') - @patch('logging.Formatter.formatException') - def test_formatException_not_string(self, fe, safe_str): - x = ColorFormatter() - value = KeyError() - fe.return_value = value - self.assertIs(x.formatException(value), value) - self.assertTrue(fe.called) - self.assertFalse(safe_str.called) - - @patch('logging.Formatter.formatException') - @patch('celery.utils.log.safe_str') - def test_formatException_string(self, safe_str, fe): - x = ColorFormatter() - fe.return_value = 'HELLO' - try: - raise Exception() - except Exception: - self.assertTrue(x.formatException(sys.exc_info())) - if sys.version_info[0] == 2: - self.assertTrue(safe_str.called) - - @patch('logging.Formatter.format') - def test_format_object(self, _format): - x = ColorFormatter() - x.use_color = True - record = Mock() - record.levelname = 'ERROR' - record.msg = object() - self.assertTrue(x.format(record)) - - @patch('celery.utils.log.safe_str') - def test_format_raises(self, safe_str): - x = ColorFormatter() - - def on_safe_str(s): - try: - raise ValueError('foo') - finally: - safe_str.side_effect = None - safe_str.side_effect = on_safe_str - - class Record(object): - levelname = 'ERROR' - msg = 'HELLO' - exc_info = 1 - exc_text = 'error text' - stack_info = None - - def __str__(self): - return on_safe_str('') - - def getMessage(self): - return self.msg - - record = Record() - safe_str.return_value = record - - msg = x.format(record) - self.assertIn('= 3: - raise - else: - break - - def assertRelativedelta(self, due, last_ran): - try: - from dateutil.relativedelta import relativedelta - except ImportError: - return - l1, d1, n1 = due.remaining_delta(last_ran) - l2, d2, n2 = due.remaining_delta(last_ran, ffwd=relativedelta) - if not isinstance(d1, relativedelta): - self.assertEqual(l1, l2) - for field, value in items(d1._fields()): - self.assertEqual(getattr(d1, field), value) - self.assertFalse(d2.years) - self.assertFalse(d2.months) - self.assertFalse(d2.days) - self.assertFalse(d2.leapdays) - self.assertFalse(d2.hours) - self.assertFalse(d2.minutes) - self.assertFalse(d2.seconds) - self.assertFalse(d2.microseconds) - - def test_every_minute_execution_is_due(self): - last_ran = self.now - timedelta(seconds=61) - due, remaining = self.every_minute.is_due(last_ran) - self.assertRelativedelta(self.every_minute, last_ran) - self.assertTrue(due) - self.seconds_almost_equal(remaining, self.next_minute, 1) - - def test_every_minute_execution_is_not_due(self): - last_ran = self.now - timedelta(seconds=self.now.second) - due, remaining = self.every_minute.is_due(last_ran) - self.assertFalse(due) - self.seconds_almost_equal(remaining, self.next_minute, 1) - - def test_execution_is_due_on_saturday(self): - # 29th of May 2010 is a saturday - with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 29, 10, 30)): - last_ran = self.now - timedelta(seconds=61) - due, remaining = self.every_minute.is_due(last_ran) - self.assertTrue(due) - self.seconds_almost_equal(remaining, self.next_minute, 1) - - def test_execution_is_due_on_sunday(self): - # 30th of May 2010 is a sunday - with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 30, 10, 30)): - last_ran = self.now - timedelta(seconds=61) - due, remaining = self.every_minute.is_due(last_ran) - self.assertTrue(due) - self.seconds_almost_equal(remaining, self.next_minute, 1) - - def test_execution_is_due_on_monday(self): - # 31st of May 2010 is a monday - with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 31, 10, 30)): - last_ran = self.now - timedelta(seconds=61) - due, remaining = self.every_minute.is_due(last_ran) - self.assertTrue(due) - self.seconds_almost_equal(remaining, self.next_minute, 1) - - def test_every_hour_execution_is_due(self): - with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 10, 10, 30)): - due, remaining = self.hourly.is_due(datetime(2010, 5, 10, 6, 30)) - self.assertTrue(due) - self.assertEqual(remaining, 60 * 60) - - def test_every_hour_execution_is_not_due(self): - with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 10, 10, 29)): - due, remaining = self.hourly.is_due(datetime(2010, 5, 10, 9, 30)) - self.assertFalse(due) - self.assertEqual(remaining, 60) - - def test_first_quarter_execution_is_due(self): - with patch_crontab_nowfun( - self.quarterly, datetime(2010, 5, 10, 10, 15)): - due, remaining = self.quarterly.is_due( - datetime(2010, 5, 10, 6, 30), - ) - self.assertTrue(due) - self.assertEqual(remaining, 15 * 60) - - def test_second_quarter_execution_is_due(self): - with patch_crontab_nowfun( - self.quarterly, datetime(2010, 5, 10, 10, 30)): - due, remaining = self.quarterly.is_due( - datetime(2010, 5, 10, 6, 30), - ) - self.assertTrue(due) - self.assertEqual(remaining, 15 * 60) - - def test_first_quarter_execution_is_not_due(self): - with patch_crontab_nowfun( - self.quarterly, datetime(2010, 5, 10, 10, 14)): - due, remaining = self.quarterly.is_due( - datetime(2010, 5, 10, 10, 0), - ) - self.assertFalse(due) - self.assertEqual(remaining, 60) - - def test_second_quarter_execution_is_not_due(self): - with patch_crontab_nowfun( - self.quarterly, datetime(2010, 5, 10, 10, 29)): - due, remaining = self.quarterly.is_due( - datetime(2010, 5, 10, 10, 15), - ) - self.assertFalse(due) - self.assertEqual(remaining, 60) - - def test_daily_execution_is_due(self): - with patch_crontab_nowfun(self.daily, datetime(2010, 5, 10, 7, 30)): - due, remaining = self.daily.is_due(datetime(2010, 5, 9, 7, 30)) - self.assertTrue(due) - self.assertEqual(remaining, 24 * 60 * 60) - - def test_daily_execution_is_not_due(self): - with patch_crontab_nowfun(self.daily, datetime(2010, 5, 10, 10, 30)): - due, remaining = self.daily.is_due(datetime(2010, 5, 10, 7, 30)) - self.assertFalse(due) - self.assertEqual(remaining, 21 * 60 * 60) - - def test_weekly_execution_is_due(self): - with patch_crontab_nowfun(self.weekly, datetime(2010, 5, 6, 7, 30)): - due, remaining = self.weekly.is_due(datetime(2010, 4, 30, 7, 30)) - self.assertTrue(due) - self.assertEqual(remaining, 7 * 24 * 60 * 60) - - def test_weekly_execution_is_not_due(self): - with patch_crontab_nowfun(self.weekly, datetime(2010, 5, 7, 10, 30)): - due, remaining = self.weekly.is_due(datetime(2010, 5, 6, 7, 30)) - self.assertFalse(due) - self.assertEqual(remaining, 6 * 24 * 60 * 60 - 3 * 60 * 60) - - def test_monthly_execution_is_due(self): - with patch_crontab_nowfun(self.monthly, datetime(2010, 5, 13, 7, 30)): - due, remaining = self.monthly.is_due(datetime(2010, 4, 8, 7, 30)) - self.assertTrue(due) - self.assertEqual(remaining, 28 * 24 * 60 * 60) - - def test_monthly_execution_is_not_due(self): - with patch_crontab_nowfun(self.monthly, datetime(2010, 5, 9, 10, 30)): - due, remaining = self.monthly.is_due(datetime(2010, 4, 8, 7, 30)) - self.assertFalse(due) - self.assertEqual(remaining, 4 * 24 * 60 * 60 - 3 * 60 * 60) - - def test_monthly_moy_execution_is_due(self): - with patch_crontab_nowfun( - self.monthly_moy, datetime(2014, 2, 26, 22, 0)): - due, remaining = self.monthly_moy.is_due( - datetime(2013, 7, 4, 10, 0), - ) - self.assertTrue(due) - self.assertEqual(remaining, 60.) - - def test_monthly_moy_execution_is_not_due(self): - raise SkipTest('unstable test') - with patch_crontab_nowfun( - self.monthly_moy, datetime(2013, 6, 28, 14, 30)): - due, remaining = self.monthly_moy.is_due( - datetime(2013, 6, 28, 22, 14), - ) - self.assertFalse(due) - attempt = ( - time.mktime(datetime(2014, 2, 26, 22, 0).timetuple()) - - time.mktime(datetime(2013, 6, 28, 14, 30).timetuple()) - - 60 * 60 - ) - self.assertEqual(remaining, attempt) - - def test_monthly_moy_execution_is_due2(self): - with patch_crontab_nowfun( - self.monthly_moy, datetime(2014, 2, 26, 22, 0)): - due, remaining = self.monthly_moy.is_due( - datetime(2013, 2, 28, 10, 0), - ) - self.assertTrue(due) - self.assertEqual(remaining, 60.) - - def test_monthly_moy_execution_is_not_due2(self): - with patch_crontab_nowfun( - self.monthly_moy, datetime(2014, 2, 26, 21, 0)): - due, remaining = self.monthly_moy.is_due( - datetime(2013, 6, 28, 22, 14), - ) - self.assertFalse(due) - attempt = 60 * 60 - self.assertEqual(remaining, attempt) - - def test_yearly_execution_is_due(self): - with patch_crontab_nowfun(self.yearly, datetime(2010, 3, 11, 7, 30)): - due, remaining = self.yearly.is_due(datetime(2009, 3, 12, 7, 30)) - self.assertTrue(due) - self.assertEqual(remaining, 364 * 24 * 60 * 60) - - def test_yearly_execution_is_not_due(self): - with patch_crontab_nowfun(self.yearly, datetime(2010, 3, 7, 10, 30)): - due, remaining = self.yearly.is_due(datetime(2009, 3, 12, 7, 30)) - self.assertFalse(due) - self.assertEqual(remaining, 4 * 24 * 60 * 60 - 3 * 60 * 60) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_utils.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_utils.py deleted file mode 100644 index b0ff108..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_utils.py +++ /dev/null @@ -1,46 +0,0 @@ -from __future__ import absolute_import - -from collections import Mapping, MutableMapping - -from celery.app.utils import Settings, filter_hidden_settings, bugreport - -from celery.tests.case import AppCase, Mock - - -class TestSettings(AppCase): - """ - Tests of celery.app.utils.Settings - """ - def test_is_mapping(self): - """Settings should be a collections.Mapping""" - self.assertTrue(issubclass(Settings, Mapping)) - - def test_is_mutable_mapping(self): - """Settings should be a collections.MutableMapping""" - self.assertTrue(issubclass(Settings, MutableMapping)) - - -class test_filter_hidden_settings(AppCase): - - def test_handles_non_string_keys(self): - """filter_hidden_settings shouldn't raise an exception when handling - mappings with non-string keys""" - conf = { - 'STRING_KEY': 'VALUE1', - ('NON', 'STRING', 'KEY'): 'VALUE2', - 'STRING_KEY2': { - 'STRING_KEY3': 1, - ('NON', 'STRING', 'KEY', '2'): 2 - }, - } - filter_hidden_settings(conf) - - -class test_bugreport(AppCase): - - def test_no_conn_driver_info(self): - self.app.connection = Mock() - conn = self.app.connection.return_value = Mock() - conn.transport = None - - bugreport(self.app) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_amqp.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_amqp.py deleted file mode 100644 index 282f8b1..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_amqp.py +++ /dev/null @@ -1,406 +0,0 @@ -from __future__ import absolute_import - -import json -import pickle -import socket - -from contextlib import contextmanager -from datetime import timedelta -from pickle import dumps, loads - -from billiard.einfo import ExceptionInfo - -from celery import states -from celery.backends.amqp import AMQPBackend -from celery.exceptions import TimeoutError -from celery.five import Empty, Queue, range -from celery.utils import uuid - -from celery.tests.case import ( - AppCase, Mock, depends_on_current_app, patch, sleepdeprived, -) - - -class SomeClass(object): - - def __init__(self, data): - self.data = data - - -class test_AMQPBackend(AppCase): - - def create_backend(self, **opts): - opts = dict(dict(serializer='pickle', persistent=True), **opts) - return AMQPBackend(self.app, **opts) - - def test_mark_as_done(self): - tb1 = self.create_backend(max_cached_results=1) - tb2 = self.create_backend(max_cached_results=1) - - tid = uuid() - - tb1.mark_as_done(tid, 42) - self.assertEqual(tb2.get_status(tid), states.SUCCESS) - self.assertEqual(tb2.get_result(tid), 42) - self.assertTrue(tb2._cache.get(tid)) - self.assertTrue(tb2.get_result(tid), 42) - - @depends_on_current_app - def test_pickleable(self): - self.assertTrue(loads(dumps(self.create_backend()))) - - def test_revive(self): - tb = self.create_backend() - tb.revive(None) - - def test_is_pickled(self): - tb1 = self.create_backend() - tb2 = self.create_backend() - - tid2 = uuid() - result = {'foo': 'baz', 'bar': SomeClass(12345)} - tb1.mark_as_done(tid2, result) - # is serialized properly. - rindb = tb2.get_result(tid2) - self.assertEqual(rindb.get('foo'), 'baz') - self.assertEqual(rindb.get('bar').data, 12345) - - def test_mark_as_failure(self): - tb1 = self.create_backend() - tb2 = self.create_backend() - - tid3 = uuid() - try: - raise KeyError('foo') - except KeyError as exception: - einfo = ExceptionInfo() - tb1.mark_as_failure(tid3, exception, traceback=einfo.traceback) - self.assertEqual(tb2.get_status(tid3), states.FAILURE) - self.assertIsInstance(tb2.get_result(tid3), KeyError) - self.assertEqual(tb2.get_traceback(tid3), einfo.traceback) - - def test_repair_uuid(self): - from celery.backends.amqp import repair_uuid - for i in range(10): - tid = uuid() - self.assertEqual(repair_uuid(tid.replace('-', '')), tid) - - def test_expires_is_int(self): - b = self.create_backend(expires=48) - self.assertEqual(b.queue_arguments.get('x-expires'), 48 * 1000.0) - - def test_expires_is_float(self): - b = self.create_backend(expires=48.3) - self.assertEqual(b.queue_arguments.get('x-expires'), 48.3 * 1000.0) - - def test_expires_is_timedelta(self): - b = self.create_backend(expires=timedelta(minutes=1)) - self.assertEqual(b.queue_arguments.get('x-expires'), 60 * 1000.0) - - @sleepdeprived() - def test_store_result_retries(self): - iterations = [0] - stop_raising_at = [5] - - def publish(*args, **kwargs): - if iterations[0] > stop_raising_at[0]: - return - iterations[0] += 1 - raise KeyError('foo') - - backend = AMQPBackend(self.app) - from celery.app.amqp import TaskProducer - prod, TaskProducer.publish = TaskProducer.publish, publish - try: - with self.assertRaises(KeyError): - backend.retry_policy['max_retries'] = None - backend.store_result('foo', 'bar', 'STARTED') - - with self.assertRaises(KeyError): - backend.retry_policy['max_retries'] = 10 - backend.store_result('foo', 'bar', 'STARTED') - finally: - TaskProducer.publish = prod - - def assertState(self, retval, state): - self.assertEqual(retval['status'], state) - - def test_poll_no_messages(self): - b = self.create_backend() - self.assertState(b.get_task_meta(uuid()), states.PENDING) - - @contextmanager - def _result_context(self, serializer='pickle'): - results = Queue() - - class Message(object): - acked = 0 - requeued = 0 - - def __init__(self, **merge): - self.payload = dict({'status': states.STARTED, - 'result': None}, **merge) - if serializer == 'json': - self.body = json.dumps(self.payload) - self.content_type = 'application/json' - else: - self.body = pickle.dumps(self.payload) - self.content_type = 'application/x-python-serialize' - self.content_encoding = 'binary' - - def ack(self, *args, **kwargs): - self.acked += 1 - - def requeue(self, *args, **kwargs): - self.requeued += 1 - - class MockBinding(object): - - def __init__(self, *args, **kwargs): - self.channel = Mock() - - def __call__(self, *args, **kwargs): - return self - - def declare(self): - pass - - def get(self, no_ack=False, accept=None): - try: - m = results.get(block=False) - if m: - m.accept = accept - return m - except Empty: - pass - - def is_bound(self): - return True - - class MockBackend(AMQPBackend): - Queue = MockBinding - - backend = MockBackend(self.app, max_cached_results=100) - backend.serializer = serializer - backend._republish = Mock() - - yield results, backend, Message - - def test_backlog_limit_exceeded(self): - with self._result_context() as (results, backend, Message): - for i in range(1001): - results.put(Message(task_id='id', status=states.RECEIVED)) - with self.assertRaises(backend.BacklogLimitExceeded): - backend.get_task_meta('id') - - def test_poll_result(self): - with self._result_context() as (results, backend, Message): - tid = uuid() - # FFWD's to the latest state. - state_messages = [ - Message(task_id=tid, status=states.RECEIVED, seq=1), - Message(task_id=tid, status=states.STARTED, seq=2), - Message(task_id=tid, status=states.FAILURE, seq=3), - ] - for state_message in state_messages: - results.put(state_message) - r1 = backend.get_task_meta(tid) - self.assertDictContainsSubset( - { - 'status': states.FAILURE, - 'seq': 3 - }, r1, 'FFWDs to the last state', - ) - - # Caches last known state. - tid = uuid() - results.put(Message(task_id=tid)) - backend.get_task_meta(tid) - self.assertIn(tid, backend._cache, 'Caches last known state') - - self.assertTrue(state_messages[-1].requeued) - - # Returns cache if no new states. - results.queue.clear() - assert not results.qsize() - backend._cache[tid] = 'hello' - self.assertEqual( - backend.get_task_meta(tid), 'hello', - 'Returns cache if no new states', - ) - - def test_poll_result_for_json_serializer(self): - with self._result_context(serializer='json') as ( - results, backend, Message): - tid = uuid() - # FFWD's to the latest state. - state_messages = [ - Message(task_id=tid, status=states.RECEIVED, seq=1), - Message(task_id=tid, status=states.STARTED, seq=2), - Message(task_id=tid, status=states.FAILURE, seq=3, - result={ - 'exc_type': 'RuntimeError', - 'exc_message': 'Mock' - }), - ] - for state_message in state_messages: - results.put(state_message) - r1 = backend.get_task_meta(tid) - self.assertDictContainsSubset({ - 'status': states.FAILURE, - 'seq': 3 - }, r1, 'FFWDs to the last state') - self.assertEquals(type(r1['result']).__name__, 'RuntimeError') - self.assertEqual(str(r1['result']), 'Mock') - - # Caches last known state. - tid = uuid() - results.put(Message(task_id=tid)) - backend.get_task_meta(tid) - self.assertIn(tid, backend._cache, 'Caches last known state') - - self.assertTrue(state_messages[-1].requeued) - - # Returns cache if no new states. - results.queue.clear() - assert not results.qsize() - backend._cache[tid] = 'hello' - self.assertEqual( - backend.get_task_meta(tid), 'hello', - 'Returns cache if no new states', - ) - - def test_wait_for(self): - b = self.create_backend() - - tid = uuid() - with self.assertRaises(TimeoutError): - b.wait_for(tid, timeout=0.1) - b.store_result(tid, None, states.STARTED) - with self.assertRaises(TimeoutError): - b.wait_for(tid, timeout=0.1) - b.store_result(tid, None, states.RETRY) - with self.assertRaises(TimeoutError): - b.wait_for(tid, timeout=0.1) - b.store_result(tid, 42, states.SUCCESS) - self.assertEqual(b.wait_for(tid, timeout=1)['result'], 42) - b.store_result(tid, 56, states.SUCCESS) - self.assertEqual(b.wait_for(tid, timeout=1)['result'], 42, - 'result is cached') - self.assertEqual(b.wait_for(tid, timeout=1, cache=False)['result'], 56) - b.store_result(tid, KeyError('foo'), states.FAILURE) - res = b.wait_for(tid, timeout=1, cache=False) - self.assertEqual(res['status'], states.FAILURE) - b.store_result(tid, KeyError('foo'), states.PENDING) - with self.assertRaises(TimeoutError): - b.wait_for(tid, timeout=0.01, cache=False) - - def test_drain_events_remaining_timeouts(self): - - class Connection(object): - - def drain_events(self, timeout=None): - pass - - b = self.create_backend() - with self.app.pool.acquire_channel(block=False) as (_, channel): - binding = b._create_binding(uuid()) - consumer = b.Consumer(channel, binding, no_ack=True) - with self.assertRaises(socket.timeout): - b.drain_events(Connection(), consumer, timeout=0.1) - - def test_get_many(self): - b = self.create_backend(max_cached_results=10) - - tids = [] - for i in range(10): - tid = uuid() - b.store_result(tid, i, states.SUCCESS) - tids.append(tid) - - res = list(b.get_many(tids, timeout=1)) - expected_results = [ - (task_id, { - 'status': states.SUCCESS, - 'result': i, - 'traceback': None, - 'task_id': task_id, - 'children': None, - }) - for i, task_id in enumerate(tids) - ] - self.assertEqual(sorted(res), sorted(expected_results)) - self.assertDictEqual(b._cache[res[0][0]], res[0][1]) - cached_res = list(b.get_many(tids, timeout=1)) - self.assertEqual(sorted(cached_res), sorted(expected_results)) - - # times out when not ready in cache (this shouldn't happen) - b._cache[res[0][0]]['status'] = states.RETRY - with self.assertRaises(socket.timeout): - list(b.get_many(tids, timeout=0.01)) - - # times out when result not yet ready - with self.assertRaises(socket.timeout): - tids = [uuid()] - b.store_result(tids[0], i, states.PENDING) - list(b.get_many(tids, timeout=0.01)) - - def test_get_many_raises_outer_block(self): - - class Backend(AMQPBackend): - - def Consumer(*args, **kwargs): - raise KeyError('foo') - - b = Backend(self.app) - with self.assertRaises(KeyError): - next(b.get_many(['id1'])) - - def test_get_many_raises_inner_block(self): - with patch('kombu.connection.Connection.drain_events') as drain: - drain.side_effect = KeyError('foo') - b = AMQPBackend(self.app) - with self.assertRaises(KeyError): - next(b.get_many(['id1'])) - - def test_consume_raises_inner_block(self): - with patch('kombu.connection.Connection.drain_events') as drain: - - def se(*args, **kwargs): - drain.side_effect = ValueError() - raise KeyError('foo') - drain.side_effect = se - b = AMQPBackend(self.app) - with self.assertRaises(ValueError): - next(b.consume('id1')) - - def test_no_expires(self): - b = self.create_backend(expires=None) - app = self.app - app.conf.CELERY_TASK_RESULT_EXPIRES = None - b = self.create_backend(expires=None) - with self.assertRaises(KeyError): - b.queue_arguments['x-expires'] - - def test_process_cleanup(self): - self.create_backend().process_cleanup() - - def test_reload_task_result(self): - with self.assertRaises(NotImplementedError): - self.create_backend().reload_task_result('x') - - def test_reload_group_result(self): - with self.assertRaises(NotImplementedError): - self.create_backend().reload_group_result('x') - - def test_save_group(self): - with self.assertRaises(NotImplementedError): - self.create_backend().save_group('x', 'x') - - def test_restore_group(self): - with self.assertRaises(NotImplementedError): - self.create_backend().restore_group('x') - - def test_delete_group(self): - with self.assertRaises(NotImplementedError): - self.create_backend().delete_group('x') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_backends.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_backends.py deleted file mode 100644 index d301e55..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_backends.py +++ /dev/null @@ -1,41 +0,0 @@ -from __future__ import absolute_import - -from celery import backends -from celery.exceptions import ImproperlyConfigured -from celery.backends.amqp import AMQPBackend -from celery.backends.cache import CacheBackend -from celery.tests.case import AppCase, depends_on_current_app, patch - - -class test_backends(AppCase): - - def test_get_backend_aliases(self): - expects = [('amqp://', AMQPBackend), - ('cache+memory://', CacheBackend)] - - for url, expect_cls in expects: - backend, url = backends.get_backend_by_url(url, self.app.loader) - self.assertIsInstance( - backend(app=self.app, url=url), - expect_cls, - ) - - def test_unknown_backend(self): - with self.assertRaises(ImportError): - backends.get_backend_cls('fasodaopjeqijwqe', self.app.loader) - - @depends_on_current_app - def test_default_backend(self): - self.assertEqual(backends.default_backend, self.app.backend) - - def test_backend_by_url(self, url='redis://localhost/1'): - from celery.backends.redis import RedisBackend - backend, url_ = backends.get_backend_by_url(url, self.app.loader) - self.assertIs(backend, RedisBackend) - self.assertEqual(url_, url) - - def test_sym_raises_ValuError(self): - with patch('celery.backends.symbol_by_name') as sbn: - sbn.side_effect = ValueError() - with self.assertRaises(ImproperlyConfigured): - backends.get_backend_cls('xxx.xxx:foo', self.app.loader) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_base.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_base.py deleted file mode 100644 index f54dc07..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_base.py +++ /dev/null @@ -1,466 +0,0 @@ -from __future__ import absolute_import - -import sys -import types - -from contextlib import contextmanager - -from celery.exceptions import ChordError -from celery.five import items, range -from celery.utils import serialization -from celery.utils.serialization import subclass_exception -from celery.utils.serialization import find_pickleable_exception as fnpe -from celery.utils.serialization import UnpickleableExceptionWrapper -from celery.utils.serialization import get_pickleable_exception as gpe - -from celery import states -from celery import group -from celery.backends.base import ( - BaseBackend, - KeyValueStoreBackend, - DisabledBackend, -) -from celery.result import result_from_tuple -from celery.utils import uuid - -from celery.tests.case import AppCase, Mock, SkipTest, patch - - -class wrapobject(object): - - def __init__(self, *args, **kwargs): - self.args = args - -if sys.version_info[0] == 3 or getattr(sys, 'pypy_version_info', None): - Oldstyle = None -else: - Oldstyle = types.ClassType('Oldstyle', (), {}) -Unpickleable = subclass_exception('Unpickleable', KeyError, 'foo.module') -Impossible = subclass_exception('Impossible', object, 'foo.module') -Lookalike = subclass_exception('Lookalike', wrapobject, 'foo.module') - - -class test_serialization(AppCase): - - def test_create_exception_cls(self): - self.assertTrue(serialization.create_exception_cls('FooError', 'm')) - self.assertTrue(serialization.create_exception_cls('FooError', 'm', - KeyError)) - - -class test_BaseBackend_interface(AppCase): - - def setup(self): - self.b = BaseBackend(self.app) - - def test__forget(self): - with self.assertRaises(NotImplementedError): - self.b._forget('SOMExx-N0Nex1stant-IDxx-') - - def test_forget(self): - with self.assertRaises(NotImplementedError): - self.b.forget('SOMExx-N0nex1stant-IDxx-') - - def test_on_chord_part_return(self): - self.b.on_chord_part_return(None, None, None) - - def test_apply_chord(self, unlock='celery.chord_unlock'): - self.app.tasks[unlock] = Mock() - self.b.apply_chord( - group(app=self.app), (), 'dakj221', None, - result=[self.app.AsyncResult(x) for x in [1, 2, 3]], - ) - self.assertTrue(self.app.tasks[unlock].apply_async.call_count) - - -class test_exception_pickle(AppCase): - - def test_oldstyle(self): - if Oldstyle is None: - raise SkipTest('py3k does not support old style classes') - self.assertTrue(fnpe(Oldstyle())) - - def test_BaseException(self): - self.assertIsNone(fnpe(Exception())) - - def test_get_pickleable_exception(self): - exc = Exception('foo') - self.assertEqual(gpe(exc), exc) - - def test_unpickleable(self): - self.assertIsInstance(fnpe(Unpickleable()), KeyError) - self.assertIsNone(fnpe(Impossible())) - - -class test_prepare_exception(AppCase): - - def setup(self): - self.b = BaseBackend(self.app) - - def test_unpickleable(self): - x = self.b.prepare_exception(Unpickleable(1, 2, 'foo')) - self.assertIsInstance(x, KeyError) - y = self.b.exception_to_python(x) - self.assertIsInstance(y, KeyError) - - def test_impossible(self): - x = self.b.prepare_exception(Impossible()) - self.assertIsInstance(x, UnpickleableExceptionWrapper) - self.assertTrue(str(x)) - y = self.b.exception_to_python(x) - self.assertEqual(y.__class__.__name__, 'Impossible') - if sys.version_info < (2, 5): - self.assertTrue(y.__class__.__module__) - else: - self.assertEqual(y.__class__.__module__, 'foo.module') - - def test_regular(self): - x = self.b.prepare_exception(KeyError('baz')) - self.assertIsInstance(x, KeyError) - y = self.b.exception_to_python(x) - self.assertIsInstance(y, KeyError) - - -class KVBackend(KeyValueStoreBackend): - mget_returns_dict = False - - def __init__(self, app, *args, **kwargs): - self.db = {} - super(KVBackend, self).__init__(app) - - def get(self, key): - return self.db.get(key) - - def set(self, key, value): - self.db[key] = value - - def mget(self, keys): - if self.mget_returns_dict: - return dict((key, self.get(key)) for key in keys) - else: - return [self.get(k) for k in keys] - - def delete(self, key): - self.db.pop(key, None) - - -class DictBackend(BaseBackend): - - def __init__(self, *args, **kwargs): - BaseBackend.__init__(self, *args, **kwargs) - self._data = {'can-delete': {'result': 'foo'}} - - def _restore_group(self, group_id): - if group_id == 'exists': - return {'result': 'group'} - - def _get_task_meta_for(self, task_id): - if task_id == 'task-exists': - return {'result': 'task'} - - def _delete_group(self, group_id): - self._data.pop(group_id, None) - - -class test_BaseBackend_dict(AppCase): - - def setup(self): - self.b = DictBackend(app=self.app) - - def test_delete_group(self): - self.b.delete_group('can-delete') - self.assertNotIn('can-delete', self.b._data) - - def test_prepare_exception_json(self): - x = DictBackend(self.app, serializer='json') - e = x.prepare_exception(KeyError('foo')) - self.assertIn('exc_type', e) - e = x.exception_to_python(e) - self.assertEqual(e.__class__.__name__, 'KeyError') - self.assertEqual(str(e), "'foo'") - - def test_save_group(self): - b = BaseBackend(self.app) - b._save_group = Mock() - b.save_group('foofoo', 'xxx') - b._save_group.assert_called_with('foofoo', 'xxx') - - def test_forget_interface(self): - b = BaseBackend(self.app) - with self.assertRaises(NotImplementedError): - b.forget('foo') - - def test_restore_group(self): - self.assertIsNone(self.b.restore_group('missing')) - self.assertIsNone(self.b.restore_group('missing')) - self.assertEqual(self.b.restore_group('exists'), 'group') - self.assertEqual(self.b.restore_group('exists'), 'group') - self.assertEqual(self.b.restore_group('exists', cache=False), 'group') - - def test_reload_group_result(self): - self.b._cache = {} - self.b.reload_group_result('exists') - self.b._cache['exists'] = {'result': 'group'} - - def test_reload_task_result(self): - self.b._cache = {} - self.b.reload_task_result('task-exists') - self.b._cache['task-exists'] = {'result': 'task'} - - def test_fail_from_current_stack(self): - self.b.mark_as_failure = Mock() - try: - raise KeyError('foo') - except KeyError as exc: - self.b.fail_from_current_stack('task_id') - self.assertTrue(self.b.mark_as_failure.called) - args = self.b.mark_as_failure.call_args[0] - self.assertEqual(args[0], 'task_id') - self.assertIs(args[1], exc) - self.assertTrue(args[2]) - - def test_prepare_value_serializes_group_result(self): - self.b.serializer = 'json' - g = self.app.GroupResult('group_id', [self.app.AsyncResult('foo')]) - v = self.b.prepare_value(g) - self.assertIsInstance(v, (list, tuple)) - self.assertEqual(result_from_tuple(v, app=self.app), g) - - v2 = self.b.prepare_value(g[0]) - self.assertIsInstance(v2, (list, tuple)) - self.assertEqual(result_from_tuple(v2, app=self.app), g[0]) - - self.b.serializer = 'pickle' - self.assertIsInstance(self.b.prepare_value(g), self.app.GroupResult) - - def test_is_cached(self): - b = BaseBackend(app=self.app, max_cached_results=1) - b._cache['foo'] = 1 - self.assertTrue(b.is_cached('foo')) - self.assertFalse(b.is_cached('false')) - - -class test_KeyValueStoreBackend(AppCase): - - def setup(self): - self.b = KVBackend(app=self.app) - - def test_on_chord_part_return(self): - assert not self.b.implements_incr - self.b.on_chord_part_return(None, None, None) - - def test_get_store_delete_result(self): - tid = uuid() - self.b.mark_as_done(tid, 'Hello world') - self.assertEqual(self.b.get_result(tid), 'Hello world') - self.assertEqual(self.b.get_status(tid), states.SUCCESS) - self.b.forget(tid) - self.assertEqual(self.b.get_status(tid), states.PENDING) - - def test_strip_prefix(self): - x = self.b.get_key_for_task('x1b34') - self.assertEqual(self.b._strip_prefix(x), 'x1b34') - self.assertEqual(self.b._strip_prefix('x1b34'), 'x1b34') - - def test_get_many(self): - for is_dict in True, False: - self.b.mget_returns_dict = is_dict - ids = dict((uuid(), i) for i in range(10)) - for id, i in items(ids): - self.b.mark_as_done(id, i) - it = self.b.get_many(list(ids)) - for i, (got_id, got_state) in enumerate(it): - self.assertEqual(got_state['result'], ids[got_id]) - self.assertEqual(i, 9) - self.assertTrue(list(self.b.get_many(list(ids)))) - - def test_get_many_times_out(self): - tasks = [uuid() for _ in range(4)] - self.b._cache[tasks[1]] = {'status': 'PENDING'} - with self.assertRaises(self.b.TimeoutError): - list(self.b.get_many(tasks, timeout=0.01, interval=0.01)) - - def test_chord_part_return_no_gid(self): - self.b.implements_incr = True - task = Mock() - state = 'SUCCESS' - result = 10 - task.request.group = None - self.b.get_key_for_chord = Mock() - self.b.get_key_for_chord.side_effect = AssertionError( - 'should not get here', - ) - self.assertIsNone(self.b.on_chord_part_return(task, state, result)) - - @contextmanager - def _chord_part_context(self, b): - - @self.app.task(shared=False) - def callback(result): - pass - - b.implements_incr = True - b.client = Mock() - with patch('celery.backends.base.GroupResult') as GR: - deps = GR.restore.return_value = Mock(name='DEPS') - deps.__len__ = Mock() - deps.__len__.return_value = 10 - b.incr = Mock() - b.incr.return_value = 10 - b.expire = Mock() - task = Mock() - task.request.group = 'grid' - cb = task.request.chord = callback.s() - task.request.chord.freeze() - callback.backend = b - callback.backend.fail_from_current_stack = Mock() - yield task, deps, cb - - def test_chord_part_return_propagate_set(self): - with self._chord_part_context(self.b) as (task, deps, _): - self.b.on_chord_part_return(task, 'SUCCESS', 10, propagate=True) - self.assertFalse(self.b.expire.called) - deps.delete.assert_called_with() - deps.join_native.assert_called_with(propagate=True, timeout=3.0) - - def test_chord_part_return_propagate_default(self): - with self._chord_part_context(self.b) as (task, deps, _): - self.b.on_chord_part_return(task, 'SUCCESS', 10, propagate=None) - self.assertFalse(self.b.expire.called) - deps.delete.assert_called_with() - deps.join_native.assert_called_with( - propagate=self.b.app.conf.CELERY_CHORD_PROPAGATES, - timeout=3.0, - ) - - def test_chord_part_return_join_raises_internal(self): - with self._chord_part_context(self.b) as (task, deps, callback): - deps._failed_join_report = lambda: iter([]) - deps.join_native.side_effect = KeyError('foo') - self.b.on_chord_part_return(task, 'SUCCESS', 10) - self.assertTrue(self.b.fail_from_current_stack.called) - args = self.b.fail_from_current_stack.call_args - exc = args[1]['exc'] - self.assertIsInstance(exc, ChordError) - self.assertIn('foo', str(exc)) - - def test_chord_part_return_join_raises_task(self): - b = KVBackend(serializer='pickle', app=self.app) - with self._chord_part_context(b) as (task, deps, callback): - deps._failed_join_report = lambda: iter([ - self.app.AsyncResult('culprit'), - ]) - deps.join_native.side_effect = KeyError('foo') - b.on_chord_part_return(task, 'SUCCESS', 10) - self.assertTrue(b.fail_from_current_stack.called) - args = b.fail_from_current_stack.call_args - exc = args[1]['exc'] - self.assertIsInstance(exc, ChordError) - self.assertIn('Dependency culprit raised', str(exc)) - - def test_restore_group_from_json(self): - b = KVBackend(serializer='json', app=self.app) - g = self.app.GroupResult( - 'group_id', - [self.app.AsyncResult('a'), self.app.AsyncResult('b')], - ) - b._save_group(g.id, g) - g2 = b._restore_group(g.id)['result'] - self.assertEqual(g2, g) - - def test_restore_group_from_pickle(self): - b = KVBackend(serializer='pickle', app=self.app) - g = self.app.GroupResult( - 'group_id', - [self.app.AsyncResult('a'), self.app.AsyncResult('b')], - ) - b._save_group(g.id, g) - g2 = b._restore_group(g.id)['result'] - self.assertEqual(g2, g) - - def test_chord_apply_fallback(self): - self.b.implements_incr = False - self.b.fallback_chord_unlock = Mock() - self.b.apply_chord( - group(app=self.app), (), 'group_id', 'body', - result='result', foo=1, - ) - self.b.fallback_chord_unlock.assert_called_with( - 'group_id', 'body', result='result', foo=1, - ) - - def test_get_missing_meta(self): - self.assertIsNone(self.b.get_result('xxx-missing')) - self.assertEqual(self.b.get_status('xxx-missing'), states.PENDING) - - def test_save_restore_delete_group(self): - tid = uuid() - tsr = self.app.GroupResult( - tid, [self.app.AsyncResult(uuid()) for _ in range(10)], - ) - self.b.save_group(tid, tsr) - self.b.restore_group(tid) - self.assertEqual(self.b.restore_group(tid), tsr) - self.b.delete_group(tid) - self.assertIsNone(self.b.restore_group(tid)) - - def test_restore_missing_group(self): - self.assertIsNone(self.b.restore_group('xxx-nonexistant')) - - -class test_KeyValueStoreBackend_interface(AppCase): - - def test_get(self): - with self.assertRaises(NotImplementedError): - KeyValueStoreBackend(self.app).get('a') - - def test_set(self): - with self.assertRaises(NotImplementedError): - KeyValueStoreBackend(self.app).set('a', 1) - - def test_incr(self): - with self.assertRaises(NotImplementedError): - KeyValueStoreBackend(self.app).incr('a') - - def test_cleanup(self): - self.assertFalse(KeyValueStoreBackend(self.app).cleanup()) - - def test_delete(self): - with self.assertRaises(NotImplementedError): - KeyValueStoreBackend(self.app).delete('a') - - def test_mget(self): - with self.assertRaises(NotImplementedError): - KeyValueStoreBackend(self.app).mget(['a']) - - def test_forget(self): - with self.assertRaises(NotImplementedError): - KeyValueStoreBackend(self.app).forget('a') - - -class test_DisabledBackend(AppCase): - - def test_store_result(self): - DisabledBackend(self.app).store_result() - - def test_is_disabled(self): - with self.assertRaises(NotImplementedError): - DisabledBackend(self.app).get_status('foo') - - def test_as_uri(self): - self.assertEqual(DisabledBackend(self.app).as_uri(), 'disabled://') - - -class test_as_uri(AppCase): - - def setup(self): - self.b = BaseBackend( - app=self.app, - url='sch://uuuu:pwpw@hostname.dom' - ) - - def test_as_uri_include_password(self): - self.assertEqual(self.b.as_uri(True), self.b.url) - - def test_as_uri_exclude_password(self): - self.assertEqual(self.b.as_uri(), 'sch://uuuu:**@hostname.dom/') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_cache.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_cache.py deleted file mode 100644 index fcd8dd5..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_cache.py +++ /dev/null @@ -1,280 +0,0 @@ -from __future__ import absolute_import - -import sys -import types - -from contextlib import contextmanager - -from kombu.utils.encoding import str_to_bytes, ensure_bytes - -from celery import signature -from celery import states -from celery import group -from celery.backends.cache import CacheBackend, DummyClient, backends -from celery.exceptions import ImproperlyConfigured -from celery.five import items, string, text_t -from celery.utils import uuid - -from celery.tests.case import ( - AppCase, Mock, disable_stdouts, mask_modules, patch, reset_modules, -) - -PY3 = sys.version_info[0] == 3 - - -class SomeClass(object): - - def __init__(self, data): - self.data = data - - -class test_CacheBackend(AppCase): - - def setup(self): - self.tb = CacheBackend(backend='memory://', app=self.app) - self.tid = uuid() - self.old_get_best_memcached = backends['memcache'] - backends['memcache'] = lambda: (DummyClient, ensure_bytes) - - def teardown(self): - backends['memcache'] = self.old_get_best_memcached - - def test_no_backend(self): - self.app.conf.CELERY_CACHE_BACKEND = None - with self.assertRaises(ImproperlyConfigured): - CacheBackend(backend=None, app=self.app) - - def test_mark_as_done(self): - self.assertEqual(self.tb.get_status(self.tid), states.PENDING) - self.assertIsNone(self.tb.get_result(self.tid)) - - self.tb.mark_as_done(self.tid, 42) - self.assertEqual(self.tb.get_status(self.tid), states.SUCCESS) - self.assertEqual(self.tb.get_result(self.tid), 42) - - def test_is_pickled(self): - result = {'foo': 'baz', 'bar': SomeClass(12345)} - self.tb.mark_as_done(self.tid, result) - # is serialized properly. - rindb = self.tb.get_result(self.tid) - self.assertEqual(rindb.get('foo'), 'baz') - self.assertEqual(rindb.get('bar').data, 12345) - - def test_mark_as_failure(self): - try: - raise KeyError('foo') - except KeyError as exception: - self.tb.mark_as_failure(self.tid, exception) - self.assertEqual(self.tb.get_status(self.tid), states.FAILURE) - self.assertIsInstance(self.tb.get_result(self.tid), KeyError) - - def test_apply_chord(self): - tb = CacheBackend(backend='memory://', app=self.app) - gid, res = uuid(), [self.app.AsyncResult(uuid()) for _ in range(3)] - tb.apply_chord(group(app=self.app), (), gid, {}, result=res) - - @patch('celery.result.GroupResult.restore') - def test_on_chord_part_return(self, restore): - tb = CacheBackend(backend='memory://', app=self.app) - - deps = Mock() - deps.__len__ = Mock() - deps.__len__.return_value = 2 - restore.return_value = deps - task = Mock() - task.name = 'foobarbaz' - self.app.tasks['foobarbaz'] = task - task.request.chord = signature(task) - - gid, res = uuid(), [self.app.AsyncResult(uuid()) for _ in range(3)] - task.request.group = gid - tb.apply_chord(group(app=self.app), (), gid, {}, result=res) - - self.assertFalse(deps.join_native.called) - tb.on_chord_part_return(task, 'SUCCESS', 10) - self.assertFalse(deps.join_native.called) - - tb.on_chord_part_return(task, 'SUCCESS', 10) - deps.join_native.assert_called_with(propagate=True, timeout=3.0) - deps.delete.assert_called_with() - - def test_mget(self): - self.tb.set('foo', 1) - self.tb.set('bar', 2) - - self.assertDictEqual(self.tb.mget(['foo', 'bar']), - {'foo': 1, 'bar': 2}) - - def test_forget(self): - self.tb.mark_as_done(self.tid, {'foo': 'bar'}) - x = self.app.AsyncResult(self.tid, backend=self.tb) - x.forget() - self.assertIsNone(x.result) - - def test_process_cleanup(self): - self.tb.process_cleanup() - - def test_expires_as_int(self): - tb = CacheBackend(backend='memory://', expires=10, app=self.app) - self.assertEqual(tb.expires, 10) - - def test_unknown_backend_raises_ImproperlyConfigured(self): - with self.assertRaises(ImproperlyConfigured): - CacheBackend(backend='unknown://', app=self.app) - - def test_as_uri_no_servers(self): - self.assertEqual(self.tb.as_uri(), 'memory:///') - - def test_as_uri_one_server(self): - backend = 'memcache://127.0.0.1:11211/' - b = CacheBackend(backend=backend, app=self.app) - self.assertEqual(b.as_uri(), backend) - - def test_as_uri_multiple_servers(self): - backend = 'memcache://127.0.0.1:11211;127.0.0.2:11211;127.0.0.3/' - b = CacheBackend(backend=backend, app=self.app) - self.assertEqual(b.as_uri(), backend) - - @disable_stdouts - def test_regression_worker_startup_info(self): - self.app.conf.result_backend = ( - 'cache+memcached://127.0.0.1:11211;127.0.0.2:11211;127.0.0.3/' - ) - worker = self.app.Worker() - worker.on_start() - self.assertTrue(worker.startup_info()) - - -class MyMemcachedStringEncodingError(Exception): - pass - - -class MemcachedClient(DummyClient): - - def set(self, key, value, *args, **kwargs): - if PY3: - key_t, must_be, not_be, cod = bytes, 'string', 'bytes', 'decode' - else: - key_t, must_be, not_be, cod = text_t, 'bytes', 'string', 'encode' - if isinstance(key, key_t): - raise MyMemcachedStringEncodingError( - 'Keys must be {0}, not {1}. Convert your ' - 'strings using mystring.{2}(charset)!'.format( - must_be, not_be, cod)) - return super(MemcachedClient, self).set(key, value, *args, **kwargs) - - -class MockCacheMixin(object): - - @contextmanager - def mock_memcache(self): - memcache = types.ModuleType('memcache') - memcache.Client = MemcachedClient - memcache.Client.__module__ = memcache.__name__ - prev, sys.modules['memcache'] = sys.modules.get('memcache'), memcache - try: - yield True - finally: - if prev is not None: - sys.modules['memcache'] = prev - - @contextmanager - def mock_pylibmc(self): - pylibmc = types.ModuleType('pylibmc') - pylibmc.Client = MemcachedClient - pylibmc.Client.__module__ = pylibmc.__name__ - prev = sys.modules.get('pylibmc') - sys.modules['pylibmc'] = pylibmc - try: - yield True - finally: - if prev is not None: - sys.modules['pylibmc'] = prev - - -class test_get_best_memcache(AppCase, MockCacheMixin): - - def test_pylibmc(self): - with self.mock_pylibmc(): - with reset_modules('celery.backends.cache'): - from celery.backends import cache - cache._imp = [None] - self.assertEqual(cache.get_best_memcache()[0].__module__, - 'pylibmc') - - def test_memcache(self): - with self.mock_memcache(): - with reset_modules('celery.backends.cache'): - with mask_modules('pylibmc'): - from celery.backends import cache - cache._imp = [None] - self.assertEqual(cache.get_best_memcache()[0]().__module__, - 'memcache') - - def test_no_implementations(self): - with mask_modules('pylibmc', 'memcache'): - with reset_modules('celery.backends.cache'): - from celery.backends import cache - cache._imp = [None] - with self.assertRaises(ImproperlyConfigured): - cache.get_best_memcache() - - def test_cached(self): - with self.mock_pylibmc(): - with reset_modules('celery.backends.cache'): - from celery.backends import cache - cache._imp = [None] - cache.get_best_memcache()[0](behaviors={'foo': 'bar'}) - self.assertTrue(cache._imp[0]) - cache.get_best_memcache()[0]() - - def test_backends(self): - from celery.backends.cache import backends - with self.mock_memcache(): - for name, fun in items(backends): - self.assertTrue(fun()) - - -class test_memcache_key(AppCase, MockCacheMixin): - - def test_memcache_unicode_key(self): - with self.mock_memcache(): - with reset_modules('celery.backends.cache'): - with mask_modules('pylibmc'): - from celery.backends import cache - cache._imp = [None] - task_id, result = string(uuid()), 42 - b = cache.CacheBackend(backend='memcache', app=self.app) - b.store_result(task_id, result, status=states.SUCCESS) - self.assertEqual(b.get_result(task_id), result) - - def test_memcache_bytes_key(self): - with self.mock_memcache(): - with reset_modules('celery.backends.cache'): - with mask_modules('pylibmc'): - from celery.backends import cache - cache._imp = [None] - task_id, result = str_to_bytes(uuid()), 42 - b = cache.CacheBackend(backend='memcache', app=self.app) - b.store_result(task_id, result, status=states.SUCCESS) - self.assertEqual(b.get_result(task_id), result) - - def test_pylibmc_unicode_key(self): - with reset_modules('celery.backends.cache'): - with self.mock_pylibmc(): - from celery.backends import cache - cache._imp = [None] - task_id, result = string(uuid()), 42 - b = cache.CacheBackend(backend='memcache', app=self.app) - b.store_result(task_id, result, status=states.SUCCESS) - self.assertEqual(b.get_result(task_id), result) - - def test_pylibmc_bytes_key(self): - with reset_modules('celery.backends.cache'): - with self.mock_pylibmc(): - from celery.backends import cache - cache._imp = [None] - task_id, result = str_to_bytes(uuid()), 42 - b = cache.CacheBackend(backend='memcache', app=self.app) - b.store_result(task_id, result, status=states.SUCCESS) - self.assertEqual(b.get_result(task_id), result) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_cassandra.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_cassandra.py deleted file mode 100644 index 1a43be9..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_cassandra.py +++ /dev/null @@ -1,190 +0,0 @@ -from __future__ import absolute_import - -import socket - -from pickle import loads, dumps - -from celery import states -from celery.exceptions import ImproperlyConfigured -from celery.tests.case import ( - AppCase, Mock, mock_module, depends_on_current_app, -) - - -class Object(object): - pass - - -def install_exceptions(mod): - # py3k: cannot catch exceptions not ineheriting from BaseException. - - class NotFoundException(Exception): - pass - - class TException(Exception): - pass - - class InvalidRequestException(Exception): - pass - - class UnavailableException(Exception): - pass - - class TimedOutException(Exception): - pass - - class AllServersUnavailable(Exception): - pass - - mod.NotFoundException = NotFoundException - mod.TException = TException - mod.InvalidRequestException = InvalidRequestException - mod.TimedOutException = TimedOutException - mod.UnavailableException = UnavailableException - mod.AllServersUnavailable = AllServersUnavailable - - -class test_CassandraBackend(AppCase): - - def setup(self): - self.app.conf.update( - CASSANDRA_SERVERS=['example.com'], - CASSANDRA_KEYSPACE='keyspace', - CASSANDRA_COLUMN_FAMILY='columns', - ) - - def test_init_no_pycassa(self): - with mock_module('pycassa'): - from celery.backends import cassandra as mod - prev, mod.pycassa = mod.pycassa, None - try: - with self.assertRaises(ImproperlyConfigured): - mod.CassandraBackend(app=self.app) - finally: - mod.pycassa = prev - - def test_init_with_and_without_LOCAL_QUROM(self): - with mock_module('pycassa'): - from celery.backends import cassandra as mod - mod.pycassa = Mock() - install_exceptions(mod.pycassa) - cons = mod.pycassa.ConsistencyLevel = Object() - cons.LOCAL_QUORUM = 'foo' - - self.app.conf.CASSANDRA_READ_CONSISTENCY = 'LOCAL_FOO' - self.app.conf.CASSANDRA_WRITE_CONSISTENCY = 'LOCAL_FOO' - - mod.CassandraBackend(app=self.app) - cons.LOCAL_FOO = 'bar' - mod.CassandraBackend(app=self.app) - - # no servers raises ImproperlyConfigured - with self.assertRaises(ImproperlyConfigured): - self.app.conf.CASSANDRA_SERVERS = None - mod.CassandraBackend( - app=self.app, keyspace='b', column_family='c', - ) - - @depends_on_current_app - def test_reduce(self): - with mock_module('pycassa'): - from celery.backends.cassandra import CassandraBackend - self.assertTrue(loads(dumps(CassandraBackend(app=self.app)))) - - def test_get_task_meta_for(self): - with mock_module('pycassa'): - from celery.backends import cassandra as mod - mod.pycassa = Mock() - install_exceptions(mod.pycassa) - mod.Thrift = Mock() - install_exceptions(mod.Thrift) - x = mod.CassandraBackend(app=self.app) - Get_Column = x._get_column_family = Mock() - get_column = Get_Column.return_value = Mock() - get = get_column.get - META = get.return_value = { - 'task_id': 'task_id', - 'status': states.SUCCESS, - 'result': '1', - 'date_done': 'date', - 'traceback': '', - 'children': None, - } - x.decode = Mock() - x.detailed_mode = False - meta = x._get_task_meta_for('task_id') - self.assertEqual(meta['status'], states.SUCCESS) - - x.detailed_mode = True - row = get.return_value = Mock() - row.values.return_value = [Mock()] - x.decode.return_value = META - meta = x._get_task_meta_for('task_id') - self.assertEqual(meta['status'], states.SUCCESS) - x.decode.return_value = Mock() - - x.detailed_mode = False - get.side_effect = KeyError() - meta = x._get_task_meta_for('task_id') - self.assertEqual(meta['status'], states.PENDING) - - calls = [0] - end = [10] - - def work_eventually(*arg): - try: - if calls[0] > end[0]: - return META - raise socket.error() - finally: - calls[0] += 1 - get.side_effect = work_eventually - x._retry_timeout = 10 - x._retry_wait = 0.01 - meta = x._get_task_meta_for('task') - self.assertEqual(meta['status'], states.SUCCESS) - - x._retry_timeout = 0.1 - calls[0], end[0] = 0, 100 - with self.assertRaises(socket.error): - x._get_task_meta_for('task') - - def test_store_result(self): - with mock_module('pycassa'): - from celery.backends import cassandra as mod - mod.pycassa = Mock() - install_exceptions(mod.pycassa) - mod.Thrift = Mock() - install_exceptions(mod.Thrift) - x = mod.CassandraBackend(app=self.app) - Get_Column = x._get_column_family = Mock() - cf = Get_Column.return_value = Mock() - x.detailed_mode = False - x._store_result('task_id', 'result', states.SUCCESS) - self.assertTrue(cf.insert.called) - - cf.insert.reset() - x.detailed_mode = True - x._store_result('task_id', 'result', states.SUCCESS) - self.assertTrue(cf.insert.called) - - def test_process_cleanup(self): - with mock_module('pycassa'): - from celery.backends import cassandra as mod - x = mod.CassandraBackend(app=self.app) - x._column_family = None - x.process_cleanup() - - x._column_family = True - x.process_cleanup() - self.assertIsNone(x._column_family) - - def test_get_column_family(self): - with mock_module('pycassa'): - from celery.backends import cassandra as mod - mod.pycassa = Mock() - install_exceptions(mod.pycassa) - x = mod.CassandraBackend(app=self.app) - self.assertTrue(x._get_column_family()) - self.assertIsNotNone(x._column_family) - self.assertIs(x._get_column_family(), x._column_family) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_couchbase.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_couchbase.py deleted file mode 100644 index 3dc6aad..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_couchbase.py +++ /dev/null @@ -1,136 +0,0 @@ -from __future__ import absolute_import - -from celery.backends import couchbase as module -from celery.backends.couchbase import CouchBaseBackend -from celery.exceptions import ImproperlyConfigured -from celery import backends -from celery.tests.case import ( - AppCase, MagicMock, Mock, SkipTest, patch, sentinel, -) - -try: - import couchbase -except ImportError: - couchbase = None # noqa - -COUCHBASE_BUCKET = 'celery_bucket' - - -class test_CouchBaseBackend(AppCase): - - def setup(self): - if couchbase is None: - raise SkipTest('couchbase is not installed.') - self.backend = CouchBaseBackend(app=self.app) - - def test_init_no_couchbase(self): - """test init no couchbase raises""" - prev, module.couchbase = module.couchbase, None - try: - with self.assertRaises(ImproperlyConfigured): - CouchBaseBackend(app=self.app) - finally: - module.couchbase = prev - - def test_init_no_settings(self): - """test init no settings""" - self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = [] - with self.assertRaises(ImproperlyConfigured): - CouchBaseBackend(app=self.app) - - def test_init_settings_is_None(self): - """Test init settings is None""" - self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = None - CouchBaseBackend(app=self.app) - - def test_get_connection_connection_exists(self): - with patch('couchbase.connection.Connection') as mock_Connection: - self.backend._connection = sentinel._connection - - connection = self.backend._get_connection() - - self.assertEqual(sentinel._connection, connection) - self.assertFalse(mock_Connection.called) - - def test_get(self): - """test_get - - CouchBaseBackend.get should return and take two params - db conn to couchbase is mocked. - TODO Should test on key not exists - - """ - self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = {} - x = CouchBaseBackend(app=self.app) - x._connection = Mock() - mocked_get = x._connection.get = Mock() - mocked_get.return_value.value = sentinel.retval - # should return None - self.assertEqual(x.get('1f3fab'), sentinel.retval) - x._connection.get.assert_called_once_with('1f3fab') - - def test_set(self): - """test_set - - CouchBaseBackend.set should return None and take two params - db conn to couchbase is mocked. - - """ - self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = None - x = CouchBaseBackend(app=self.app) - x._connection = MagicMock() - x._connection.set = MagicMock() - # should return None - self.assertIsNone(x.set(sentinel.key, sentinel.value)) - - def test_delete(self): - """test_delete - - CouchBaseBackend.delete should return and take two params - db conn to couchbase is mocked. - TODO Should test on key not exists - - """ - self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = {} - x = CouchBaseBackend(app=self.app) - x._connection = Mock() - mocked_delete = x._connection.delete = Mock() - mocked_delete.return_value = None - # should return None - self.assertIsNone(x.delete('1f3fab')) - x._connection.delete.assert_called_once_with('1f3fab') - - def test_config_params(self): - """test_config_params - - celery.conf.CELERY_COUCHBASE_BACKEND_SETTINGS is properly set - """ - self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = { - 'bucket': 'mycoolbucket', - 'host': ['here.host.com', 'there.host.com'], - 'username': 'johndoe', - 'password': 'mysecret', - 'port': '1234', - } - x = CouchBaseBackend(app=self.app) - self.assertEqual(x.bucket, 'mycoolbucket') - self.assertEqual(x.host, ['here.host.com', 'there.host.com'],) - self.assertEqual(x.username, 'johndoe',) - self.assertEqual(x.password, 'mysecret') - self.assertEqual(x.port, 1234) - - def test_backend_by_url(self, url='couchbase://myhost/mycoolbucket'): - from celery.backends.couchbase import CouchBaseBackend - backend, url_ = backends.get_backend_by_url(url, self.app.loader) - self.assertIs(backend, CouchBaseBackend) - self.assertEqual(url_, url) - - def test_backend_params_by_url(self): - url = 'couchbase://johndoe:mysecret@myhost:123/mycoolbucket' - with self.Celery(backend=url) as app: - x = app.backend - self.assertEqual(x.bucket, 'mycoolbucket') - self.assertEqual(x.host, 'myhost') - self.assertEqual(x.username, 'johndoe') - self.assertEqual(x.password, 'mysecret') - self.assertEqual(x.port, 123) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_database.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_database.py deleted file mode 100644 index 6b5bf94..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_database.py +++ /dev/null @@ -1,196 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -from datetime import datetime - -from pickle import loads, dumps - -from celery import states -from celery.exceptions import ImproperlyConfigured -from celery.utils import uuid - -from celery.tests.case import ( - AppCase, - SkipTest, - depends_on_current_app, - mask_modules, - skip_if_pypy, - skip_if_jython, -) - -try: - import sqlalchemy # noqa -except ImportError: - DatabaseBackend = Task = TaskSet = retry = None # noqa -else: - from celery.backends.database import DatabaseBackend, retry - from celery.backends.database.models import Task, TaskSet - - -class SomeClass(object): - - def __init__(self, data): - self.data = data - - -class test_DatabaseBackend(AppCase): - - @skip_if_pypy - @skip_if_jython - def setup(self): - if DatabaseBackend is None: - raise SkipTest('sqlalchemy not installed') - self.uri = 'sqlite:///test.db' - - def test_retry_helper(self): - from celery.backends.database import DatabaseError - - calls = [0] - - @retry - def raises(): - calls[0] += 1 - raise DatabaseError(1, 2, 3) - - with self.assertRaises(DatabaseError): - raises(max_retries=5) - self.assertEqual(calls[0], 5) - - def test_missing_SQLAlchemy_raises_ImproperlyConfigured(self): - with mask_modules('sqlalchemy'): - from celery.backends.database import _sqlalchemy_installed - with self.assertRaises(ImproperlyConfigured): - _sqlalchemy_installed() - - def test_missing_dburi_raises_ImproperlyConfigured(self): - self.app.conf.CELERY_RESULT_DBURI = None - with self.assertRaises(ImproperlyConfigured): - DatabaseBackend(app=self.app) - - def test_missing_task_id_is_PENDING(self): - tb = DatabaseBackend(self.uri, app=self.app) - self.assertEqual(tb.get_status('xxx-does-not-exist'), states.PENDING) - - def test_missing_task_meta_is_dict_with_pending(self): - tb = DatabaseBackend(self.uri, app=self.app) - self.assertDictContainsSubset({ - 'status': states.PENDING, - 'task_id': 'xxx-does-not-exist-at-all', - 'result': None, - 'traceback': None - }, tb.get_task_meta('xxx-does-not-exist-at-all')) - - def test_mark_as_done(self): - tb = DatabaseBackend(self.uri, app=self.app) - - tid = uuid() - - self.assertEqual(tb.get_status(tid), states.PENDING) - self.assertIsNone(tb.get_result(tid)) - - tb.mark_as_done(tid, 42) - self.assertEqual(tb.get_status(tid), states.SUCCESS) - self.assertEqual(tb.get_result(tid), 42) - - def test_is_pickled(self): - tb = DatabaseBackend(self.uri, app=self.app) - - tid2 = uuid() - result = {'foo': 'baz', 'bar': SomeClass(12345)} - tb.mark_as_done(tid2, result) - # is serialized properly. - rindb = tb.get_result(tid2) - self.assertEqual(rindb.get('foo'), 'baz') - self.assertEqual(rindb.get('bar').data, 12345) - - def test_mark_as_started(self): - tb = DatabaseBackend(self.uri, app=self.app) - tid = uuid() - tb.mark_as_started(tid) - self.assertEqual(tb.get_status(tid), states.STARTED) - - def test_mark_as_revoked(self): - tb = DatabaseBackend(self.uri, app=self.app) - tid = uuid() - tb.mark_as_revoked(tid) - self.assertEqual(tb.get_status(tid), states.REVOKED) - - def test_mark_as_retry(self): - tb = DatabaseBackend(self.uri, app=self.app) - tid = uuid() - try: - raise KeyError('foo') - except KeyError as exception: - import traceback - trace = '\n'.join(traceback.format_stack()) - tb.mark_as_retry(tid, exception, traceback=trace) - self.assertEqual(tb.get_status(tid), states.RETRY) - self.assertIsInstance(tb.get_result(tid), KeyError) - self.assertEqual(tb.get_traceback(tid), trace) - - def test_mark_as_failure(self): - tb = DatabaseBackend(self.uri, app=self.app) - - tid3 = uuid() - try: - raise KeyError('foo') - except KeyError as exception: - import traceback - trace = '\n'.join(traceback.format_stack()) - tb.mark_as_failure(tid3, exception, traceback=trace) - self.assertEqual(tb.get_status(tid3), states.FAILURE) - self.assertIsInstance(tb.get_result(tid3), KeyError) - self.assertEqual(tb.get_traceback(tid3), trace) - - def test_forget(self): - tb = DatabaseBackend(self.uri, backend='memory://', app=self.app) - tid = uuid() - tb.mark_as_done(tid, {'foo': 'bar'}) - tb.mark_as_done(tid, {'foo': 'bar'}) - x = self.app.AsyncResult(tid, backend=tb) - x.forget() - self.assertIsNone(x.result) - - def test_process_cleanup(self): - tb = DatabaseBackend(self.uri, app=self.app) - tb.process_cleanup() - - @depends_on_current_app - def test_reduce(self): - tb = DatabaseBackend(self.uri, app=self.app) - self.assertTrue(loads(dumps(tb))) - - def test_save__restore__delete_group(self): - tb = DatabaseBackend(self.uri, app=self.app) - - tid = uuid() - res = {'something': 'special'} - self.assertEqual(tb.save_group(tid, res), res) - - res2 = tb.restore_group(tid) - self.assertEqual(res2, res) - - tb.delete_group(tid) - self.assertIsNone(tb.restore_group(tid)) - - self.assertIsNone(tb.restore_group('xxx-nonexisting-id')) - - def test_cleanup(self): - tb = DatabaseBackend(self.uri, app=self.app) - for i in range(10): - tb.mark_as_done(uuid(), 42) - tb.save_group(uuid(), {'foo': 'bar'}) - s = tb.ResultSession() - for t in s.query(Task).all(): - t.date_done = datetime.now() - tb.expires * 2 - for t in s.query(TaskSet).all(): - t.date_done = datetime.now() - tb.expires * 2 - s.commit() - s.close() - - tb.cleanup() - - def test_Task__repr__(self): - self.assertIn('foo', repr(Task('foo'))) - - def test_TaskSet__repr__(self): - self.assertIn('foo', repr(TaskSet('foo', None))) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_mongodb.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_mongodb.py deleted file mode 100644 index bce429f..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_mongodb.py +++ /dev/null @@ -1,366 +0,0 @@ -from __future__ import absolute_import - -import datetime -import uuid - -from pickle import loads, dumps - -from celery import states -from celery.backends import mongodb as module -from celery.backends.mongodb import MongoBackend, pymongo -from celery.exceptions import ImproperlyConfigured -from celery.tests.case import ( - AppCase, MagicMock, Mock, SkipTest, ANY, - depends_on_current_app, disable_stdouts, patch, sentinel, -) - -COLLECTION = 'taskmeta_celery' -TASK_ID = str(uuid.uuid1()) -MONGODB_HOST = 'localhost' -MONGODB_PORT = 27017 -MONGODB_USER = 'mongo' -MONGODB_PASSWORD = '1234' -MONGODB_DATABASE = 'testing' -MONGODB_COLLECTION = 'collection1' - - -class test_MongoBackend(AppCase): - - default_url = 'mongodb://uuuu:pwpw@hostname.dom/database' - replica_set_url = ( - 'mongodb://uuuu:pwpw@hostname.dom,' - 'hostname.dom/database?replicaSet=rs' - ) - sanitized_default_url = 'mongodb://uuuu:**@hostname.dom/database' - sanitized_replica_set_url = ( - 'mongodb://uuuu:**@hostname.dom/,' - 'hostname.dom/database?replicaSet=rs' - ) - - def setup(self): - if pymongo is None: - raise SkipTest('pymongo is not installed.') - - R = self._reset = {} - R['encode'], MongoBackend.encode = MongoBackend.encode, Mock() - R['decode'], MongoBackend.decode = MongoBackend.decode, Mock() - R['Binary'], module.Binary = module.Binary, Mock() - R['datetime'], datetime.datetime = datetime.datetime, Mock() - - self.backend = MongoBackend(app=self.app, url=self.default_url) - - def teardown(self): - MongoBackend.encode = self._reset['encode'] - MongoBackend.decode = self._reset['decode'] - module.Binary = self._reset['Binary'] - datetime.datetime = self._reset['datetime'] - - def test_init_no_mongodb(self): - prev, module.pymongo = module.pymongo, None - try: - with self.assertRaises(ImproperlyConfigured): - MongoBackend(app=self.app) - finally: - module.pymongo = prev - - def test_init_no_settings(self): - self.app.conf.CELERY_MONGODB_BACKEND_SETTINGS = [] - with self.assertRaises(ImproperlyConfigured): - MongoBackend(app=self.app) - - def test_init_settings_is_None(self): - self.app.conf.CELERY_MONGODB_BACKEND_SETTINGS = None - MongoBackend(app=self.app) - - def test_restore_group_no_entry(self): - x = MongoBackend(app=self.app) - x.collection = Mock() - fo = x.collection.find_one = Mock() - fo.return_value = None - self.assertIsNone(x._restore_group('1f3fab')) - - @depends_on_current_app - def test_reduce(self): - x = MongoBackend(app=self.app) - self.assertTrue(loads(dumps(x))) - - def test_get_connection_connection_exists(self): - - with patch('pymongo.MongoClient') as mock_Connection: - self.backend._connection = sentinel._connection - - connection = self.backend._get_connection() - - self.assertEqual(sentinel._connection, connection) - self.assertFalse(mock_Connection.called) - - def test_get_connection_no_connection_host(self): - - with patch('pymongo.MongoClient') as mock_Connection: - self.backend._connection = None - self.backend.host = MONGODB_HOST - self.backend.port = MONGODB_PORT - mock_Connection.return_value = sentinel.connection - - connection = self.backend._get_connection() - mock_Connection.assert_called_once_with( - host='mongodb://localhost:27017', - **self.backend._prepare_client_options() - ) - self.assertEqual(sentinel.connection, connection) - - def test_get_connection_no_connection_mongodb_uri(self): - - with patch('pymongo.MongoClient') as mock_Connection: - mongodb_uri = 'mongodb://%s:%d' % (MONGODB_HOST, MONGODB_PORT) - self.backend._connection = None - self.backend.host = mongodb_uri - - mock_Connection.return_value = sentinel.connection - - connection = self.backend._get_connection() - mock_Connection.assert_called_once_with( - host=mongodb_uri, **self.backend._prepare_client_options() - ) - self.assertEqual(sentinel.connection, connection) - - @patch('celery.backends.mongodb.MongoBackend._get_connection') - def test_get_database_no_existing(self, mock_get_connection): - # Should really check for combinations of these two, to be complete. - self.backend.user = MONGODB_USER - self.backend.password = MONGODB_PASSWORD - - mock_database = Mock() - mock_connection = MagicMock(spec=['__getitem__']) - mock_connection.__getitem__.return_value = mock_database - mock_get_connection.return_value = mock_connection - - database = self.backend.database - - self.assertTrue(database is mock_database) - self.assertTrue(self.backend.__dict__['database'] is mock_database) - mock_database.authenticate.assert_called_once_with( - MONGODB_USER, MONGODB_PASSWORD) - - @patch('celery.backends.mongodb.MongoBackend._get_connection') - def test_get_database_no_existing_no_auth(self, mock_get_connection): - # Should really check for combinations of these two, to be complete. - self.backend.user = None - self.backend.password = None - - mock_database = Mock() - mock_connection = MagicMock(spec=['__getitem__']) - mock_connection.__getitem__.return_value = mock_database - mock_get_connection.return_value = mock_connection - - database = self.backend.database - - self.assertTrue(database is mock_database) - self.assertFalse(mock_database.authenticate.called) - self.assertTrue(self.backend.__dict__['database'] is mock_database) - - def test_process_cleanup(self): - self.backend._connection = None - self.backend.process_cleanup() - self.assertEqual(self.backend._connection, None) - - self.backend._connection = 'not none' - self.backend.process_cleanup() - self.assertEqual(self.backend._connection, None) - - @patch('celery.backends.mongodb.MongoBackend._get_database') - def test_store_result(self, mock_get_database): - self.backend.taskmeta_collection = MONGODB_COLLECTION - - mock_database = MagicMock(spec=['__getitem__', '__setitem__']) - mock_collection = Mock() - - mock_get_database.return_value = mock_database - mock_database.__getitem__.return_value = mock_collection - - ret_val = self.backend._store_result( - sentinel.task_id, sentinel.result, sentinel.status) - - mock_get_database.assert_called_once_with() - mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) - mock_collection.save.assert_called_once_with(ANY) - self.assertEqual(sentinel.result, ret_val) - - @patch('celery.backends.mongodb.MongoBackend._get_database') - def test_get_task_meta_for(self, mock_get_database): - datetime.datetime = self._reset['datetime'] - self.backend.taskmeta_collection = MONGODB_COLLECTION - - mock_database = MagicMock(spec=['__getitem__', '__setitem__']) - mock_collection = Mock() - mock_collection.find_one.return_value = MagicMock() - - mock_get_database.return_value = mock_database - mock_database.__getitem__.return_value = mock_collection - - ret_val = self.backend._get_task_meta_for(sentinel.task_id) - - mock_get_database.assert_called_once_with() - mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) - self.assertEqual( - list(sorted(['status', 'task_id', 'date_done', 'traceback', - 'result', 'children'])), - list(sorted(ret_val.keys())), - ) - - @patch('celery.backends.mongodb.MongoBackend._get_database') - def test_get_task_meta_for_no_result(self, mock_get_database): - self.backend.taskmeta_collection = MONGODB_COLLECTION - - mock_database = MagicMock(spec=['__getitem__', '__setitem__']) - mock_collection = Mock() - mock_collection.find_one.return_value = None - - mock_get_database.return_value = mock_database - mock_database.__getitem__.return_value = mock_collection - - ret_val = self.backend._get_task_meta_for(sentinel.task_id) - - mock_get_database.assert_called_once_with() - mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) - self.assertEqual({'status': states.PENDING, 'result': None}, ret_val) - - @patch('celery.backends.mongodb.MongoBackend._get_database') - def test_save_group(self, mock_get_database): - self.backend.taskmeta_collection = MONGODB_COLLECTION - - mock_database = MagicMock(spec=['__getitem__', '__setitem__']) - mock_collection = Mock() - - mock_get_database.return_value = mock_database - mock_database.__getitem__.return_value = mock_collection - - ret_val = self.backend._save_group( - sentinel.taskset_id, sentinel.result) - - mock_get_database.assert_called_once_with() - mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) - mock_collection.save.assert_called_once_with(ANY) - self.assertEqual(sentinel.result, ret_val) - - @patch('celery.backends.mongodb.MongoBackend._get_database') - def test_restore_group(self, mock_get_database): - self.backend.taskmeta_collection = MONGODB_COLLECTION - - mock_database = MagicMock(spec=['__getitem__', '__setitem__']) - mock_collection = Mock() - mock_collection.find_one.return_value = MagicMock() - - mock_get_database.return_value = mock_database - mock_database.__getitem__.return_value = mock_collection - - ret_val = self.backend._restore_group(sentinel.taskset_id) - - mock_get_database.assert_called_once_with() - mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) - mock_collection.find_one.assert_called_once_with( - {'_id': sentinel.taskset_id}) - self.assertItemsEqual( - ['date_done', 'result', 'task_id'], - list(ret_val.keys()), - ) - - @patch('celery.backends.mongodb.MongoBackend._get_database') - def test_delete_group(self, mock_get_database): - self.backend.taskmeta_collection = MONGODB_COLLECTION - - mock_database = MagicMock(spec=['__getitem__', '__setitem__']) - mock_collection = Mock() - - mock_get_database.return_value = mock_database - mock_database.__getitem__.return_value = mock_collection - - self.backend._delete_group(sentinel.taskset_id) - - mock_get_database.assert_called_once_with() - mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) - mock_collection.remove.assert_called_once_with( - {'_id': sentinel.taskset_id}) - - @patch('celery.backends.mongodb.MongoBackend._get_database') - def test_forget(self, mock_get_database): - self.backend.taskmeta_collection = MONGODB_COLLECTION - - mock_database = MagicMock(spec=['__getitem__', '__setitem__']) - mock_collection = Mock() - - mock_get_database.return_value = mock_database - mock_database.__getitem__.return_value = mock_collection - - self.backend._forget(sentinel.task_id) - - mock_get_database.assert_called_once_with() - mock_database.__getitem__.assert_called_once_with( - MONGODB_COLLECTION) - mock_collection.remove.assert_called_once_with( - {'_id': sentinel.task_id}) - - @patch('celery.backends.mongodb.MongoBackend._get_database') - def test_cleanup(self, mock_get_database): - datetime.datetime = self._reset['datetime'] - self.backend.taskmeta_collection = MONGODB_COLLECTION - - mock_database = MagicMock(spec=['__getitem__', '__setitem__']) - self.backend.collections = mock_collection = Mock() - - mock_get_database.return_value = mock_database - mock_database.__getitem__.return_value = mock_collection - - self.backend.app.now = datetime.datetime.utcnow - self.backend.cleanup() - - mock_get_database.assert_called_once_with() - mock_database.__getitem__.assert_called_once_with( - MONGODB_COLLECTION) - self.assertTrue(mock_collection.remove.called) - - def test_get_database_authfailure(self): - x = MongoBackend(app=self.app) - x._get_connection = Mock() - conn = x._get_connection.return_value = {} - db = conn[x.database_name] = Mock() - db.authenticate.return_value = False - x.user = 'jerry' - x.password = 'cere4l' - with self.assertRaises(ImproperlyConfigured): - x._get_database() - db.authenticate.assert_called_with('jerry', 'cere4l') - - @patch('celery.backends.mongodb.detect_environment') - def test_prepare_client_options_for_ver_2(self, m_detect_env): - m_detect_env.return_value = 'default' - with patch('pymongo.version_tuple', new=(2, 6, 3)): - options = self.backend._prepare_client_options() - self.assertDictEqual(options, { - 'max_pool_size': self.backend.max_pool_size, - 'auto_start_request': False - }) - - def test_as_uri_include_password(self): - self.assertEqual(self.backend.as_uri(True), self.default_url) - - def test_as_uri_exclude_password(self): - self.assertEqual(self.backend.as_uri(), self.sanitized_default_url) - - def test_as_uri_include_password_replica_set(self): - backend = MongoBackend(app=self.app, url=self.replica_set_url) - self.assertEqual(backend.as_uri(True), self.replica_set_url) - - def test_as_uri_exclude_password_replica_set(self): - backend = MongoBackend(app=self.app, url=self.replica_set_url) - self.assertEqual(backend.as_uri(), self.sanitized_replica_set_url) - - @disable_stdouts - def test_regression_worker_startup_info(self): - self.app.conf.result_backend = ( - 'mongodb://user:password@host0.com:43437,host1.com:43437' - '/work4us?replicaSet=rs&ssl=true' - ) - worker = self.app.Worker() - worker.on_start() - self.assertTrue(worker.startup_info()) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_redis.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_redis.py deleted file mode 100644 index a0de4b7..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_redis.py +++ /dev/null @@ -1,282 +0,0 @@ -from __future__ import absolute_import - -from datetime import timedelta - -from pickle import loads, dumps - -from celery import signature -from celery import states -from celery import group -from celery import uuid -from celery.datastructures import AttributeDict -from celery.exceptions import ImproperlyConfigured -from celery.utils.timeutils import timedelta_seconds - -from celery.tests.case import ( - AppCase, Mock, MockCallbacks, SkipTest, depends_on_current_app, patch, -) - - -class Connection(object): - connected = True - - def disconnect(self): - self.connected = False - - -class Pipeline(object): - - def __init__(self, client): - self.client = client - self.steps = [] - - def __getattr__(self, attr): - - def add_step(*args, **kwargs): - self.steps.append((getattr(self.client, attr), args, kwargs)) - return self - return add_step - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - pass - - def execute(self): - return [step(*a, **kw) for step, a, kw in self.steps] - - -class Redis(MockCallbacks): - Connection = Connection - Pipeline = Pipeline - - def __init__(self, host=None, port=None, db=None, password=None, **kw): - self.host = host - self.port = port - self.db = db - self.password = password - self.keyspace = {} - self.expiry = {} - self.connection = self.Connection() - - def get(self, key): - return self.keyspace.get(key) - - def setex(self, key, value, expires): - self.set(key, value) - self.expire(key, expires) - - def set(self, key, value): - self.keyspace[key] = value - - def expire(self, key, expires): - self.expiry[key] = expires - return expires - - def delete(self, key): - return bool(self.keyspace.pop(key, None)) - - def pipeline(self): - return self.Pipeline(self) - - def _get_list(self, key): - try: - return self.keyspace[key] - except KeyError: - l = self.keyspace[key] = [] - return l - - def rpush(self, key, value): - self._get_list(key).append(value) - - def lrange(self, key, start, stop): - return self._get_list(key)[start:stop] - - def llen(self, key): - return len(self.keyspace.get(key) or []) - - -class redis(object): - VERSION = (2, 4, 10) - Redis = Redis - - class ConnectionPool(object): - - def __init__(self, **kwargs): - pass - - class UnixDomainSocketConnection(object): - - def __init__(self, **kwargs): - pass - - -class test_RedisBackend(AppCase): - - def get_backend(self): - from celery.backends.redis import RedisBackend - - class _RedisBackend(RedisBackend): - redis = redis - - return _RedisBackend - - def setup(self): - self.Backend = self.get_backend() - - @depends_on_current_app - def test_reduce(self): - try: - from celery.backends.redis import RedisBackend - x = RedisBackend(app=self.app, new_join=True) - self.assertTrue(loads(dumps(x))) - except ImportError: - raise SkipTest('redis not installed') - - def test_no_redis(self): - self.Backend.redis = None - with self.assertRaises(ImproperlyConfigured): - self.Backend(app=self.app, new_join=True) - - def test_url(self): - x = self.Backend( - 'redis://:bosco@vandelay.com:123//1', app=self.app, - new_join=True, - ) - self.assertTrue(x.connparams) - self.assertEqual(x.connparams['host'], 'vandelay.com') - self.assertEqual(x.connparams['db'], 1) - self.assertEqual(x.connparams['port'], 123) - self.assertEqual(x.connparams['password'], 'bosco') - - def test_socket_url(self): - x = self.Backend( - 'socket:///tmp/redis.sock?virtual_host=/3', app=self.app, - new_join=True, - ) - self.assertTrue(x.connparams) - self.assertEqual(x.connparams['path'], '/tmp/redis.sock') - self.assertIs( - x.connparams['connection_class'], - redis.UnixDomainSocketConnection, - ) - self.assertNotIn('host', x.connparams) - self.assertNotIn('port', x.connparams) - self.assertEqual(x.connparams['db'], 3) - - def test_compat_propertie(self): - x = self.Backend( - 'redis://:bosco@vandelay.com:123//1', app=self.app, - new_join=True, - ) - with self.assertPendingDeprecation(): - self.assertEqual(x.host, 'vandelay.com') - with self.assertPendingDeprecation(): - self.assertEqual(x.db, 1) - with self.assertPendingDeprecation(): - self.assertEqual(x.port, 123) - with self.assertPendingDeprecation(): - self.assertEqual(x.password, 'bosco') - - def test_conf_raises_KeyError(self): - self.app.conf = AttributeDict({ - 'CELERY_RESULT_SERIALIZER': 'json', - 'CELERY_MAX_CACHED_RESULTS': 1, - 'CELERY_ACCEPT_CONTENT': ['json'], - 'CELERY_TASK_RESULT_EXPIRES': None, - }) - self.Backend(app=self.app, new_join=True) - - def test_expires_defaults_to_config(self): - self.app.conf.CELERY_TASK_RESULT_EXPIRES = 10 - b = self.Backend(expires=None, app=self.app, new_join=True) - self.assertEqual(b.expires, 10) - - def test_expires_is_int(self): - b = self.Backend(expires=48, app=self.app, new_join=True) - self.assertEqual(b.expires, 48) - - def test_set_new_join_from_url_query(self): - b = self.Backend('redis://?new_join=True;foobar=1', app=self.app) - self.assertEqual(b.on_chord_part_return, b._new_chord_return) - self.assertEqual(b.apply_chord, b._new_chord_apply) - - def test_default_is_old_join(self): - b = self.Backend(app=self.app) - self.assertNotEqual(b.on_chord_part_return, b._new_chord_return) - self.assertNotEqual(b.apply_chord, b._new_chord_apply) - - def test_expires_is_None(self): - b = self.Backend(expires=None, app=self.app, new_join=True) - self.assertEqual(b.expires, timedelta_seconds( - self.app.conf.CELERY_TASK_RESULT_EXPIRES)) - - def test_expires_is_timedelta(self): - b = self.Backend( - expires=timedelta(minutes=1), app=self.app, new_join=1, - ) - self.assertEqual(b.expires, 60) - - def test_apply_chord(self): - self.Backend(app=self.app, new_join=True).apply_chord( - group(app=self.app), (), 'group_id', {}, - result=[self.app.AsyncResult(x) for x in [1, 2, 3]], - ) - - def test_mget(self): - b = self.Backend(app=self.app, new_join=True) - self.assertTrue(b.mget(['a', 'b', 'c'])) - b.client.mget.assert_called_with(['a', 'b', 'c']) - - def test_set_no_expire(self): - b = self.Backend(app=self.app, new_join=True) - b.expires = None - b.set('foo', 'bar') - - @patch('celery.result.GroupResult.restore') - def test_on_chord_part_return(self, restore): - b = self.Backend(app=self.app, new_join=True) - - def create_task(): - tid = uuid() - task = Mock(name='task-{0}'.format(tid)) - task.name = 'foobarbaz' - self.app.tasks['foobarbaz'] = task - task.request.chord = signature(task) - task.request.id = tid - task.request.chord['chord_size'] = 10 - task.request.group = 'group_id' - return task - - tasks = [create_task() for i in range(10)] - - for i in range(10): - b.on_chord_part_return(tasks[i], states.SUCCESS, i) - self.assertTrue(b.client.rpush.call_count) - b.client.rpush.reset_mock() - self.assertTrue(b.client.lrange.call_count) - gkey = b.get_key_for_group('group_id', '.j') - b.client.delete.assert_called_with(gkey) - b.client.expire.assert_called_with(gkey, 86400) - - def test_process_cleanup(self): - self.Backend(app=self.app, new_join=True).process_cleanup() - - def test_get_set_forget(self): - b = self.Backend(app=self.app, new_join=True) - tid = uuid() - b.store_result(tid, 42, states.SUCCESS) - self.assertEqual(b.get_status(tid), states.SUCCESS) - self.assertEqual(b.get_result(tid), 42) - b.forget(tid) - self.assertEqual(b.get_status(tid), states.PENDING) - - def test_set_expires(self): - b = self.Backend(expires=512, app=self.app, new_join=True) - tid = uuid() - key = b.get_key_for_task(tid) - b.store_result(tid, 42, states.SUCCESS) - b.client.expire.assert_called_with( - key, 512, - ) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_rpc.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_rpc.py deleted file mode 100644 index 6fe594c..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_rpc.py +++ /dev/null @@ -1,75 +0,0 @@ -from __future__ import absolute_import - -from celery.backends.rpc import RPCBackend -from celery._state import _task_stack - -from celery.tests.case import AppCase, Mock, patch - - -class test_RPCBackend(AppCase): - - def setup(self): - self.b = RPCBackend(app=self.app) - - def test_oid(self): - oid = self.b.oid - oid2 = self.b.oid - self.assertEqual(oid, oid2) - self.assertEqual(oid, self.app.oid) - - def test_interface(self): - self.b.on_reply_declare('task_id') - - def test_destination_for(self): - req = Mock(name='request') - req.reply_to = 'reply_to' - req.correlation_id = 'corid' - self.assertTupleEqual( - self.b.destination_for('task_id', req), - ('reply_to', 'corid'), - ) - task = Mock() - _task_stack.push(task) - try: - task.request.reply_to = 'reply_to' - task.request.correlation_id = 'corid' - self.assertTupleEqual( - self.b.destination_for('task_id', None), - ('reply_to', 'corid'), - ) - finally: - _task_stack.pop() - - with self.assertRaises(RuntimeError): - self.b.destination_for('task_id', None) - - def test_binding(self): - queue = self.b.binding - self.assertEqual(queue.name, self.b.oid) - self.assertEqual(queue.exchange, self.b.exchange) - self.assertEqual(queue.routing_key, self.b.oid) - self.assertFalse(queue.durable) - self.assertFalse(queue.auto_delete) - - def test_many_bindings(self): - self.assertListEqual( - self.b._many_bindings(['a', 'b']), - [self.b.binding], - ) - - def test_create_binding(self): - self.assertEqual(self.b._create_binding('id'), self.b.binding) - - def test_on_task_call(self): - with patch('celery.backends.rpc.maybe_declare') as md: - with self.app.amqp.producer_pool.acquire() as prod: - self.b.on_task_call(prod, 'task_id'), - md.assert_called_with( - self.b.binding(prod.channel), - retry=True, - ) - - def test_create_exchange(self): - ex = self.b._create_exchange('name') - self.assertIsInstance(ex, self.b.Exchange) - self.assertEqual(ex.name, '') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/proj/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/proj/__init__.py deleted file mode 100644 index ffe8fb0..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/proj/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from __future__ import absolute_import - -from celery import Celery - -hello = Celery(set_as_current=False) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/proj/app.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/proj/app.py deleted file mode 100644 index f1fb15e..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/proj/app.py +++ /dev/null @@ -1,5 +0,0 @@ -from __future__ import absolute_import - -from celery import Celery - -app = Celery(set_as_current=False) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_amqp.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_amqp.py deleted file mode 100644 index 8840a9f..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_amqp.py +++ /dev/null @@ -1,153 +0,0 @@ -from __future__ import absolute_import - -from celery.bin.amqp import ( - AMQPAdmin, - AMQShell, - dump_message, - amqp, - main, -) - -from celery.tests.case import AppCase, Mock, WhateverIO, patch - - -class test_AMQShell(AppCase): - - def setup(self): - self.fh = WhateverIO() - self.adm = self.create_adm() - self.shell = AMQShell(connect=self.adm.connect, out=self.fh) - - def create_adm(self, *args, **kwargs): - return AMQPAdmin(app=self.app, out=self.fh, *args, **kwargs) - - def test_queue_declare(self): - self.shell.onecmd('queue.declare foo') - self.assertIn('ok', self.fh.getvalue()) - - def test_missing_command(self): - self.shell.onecmd('foo foo') - self.assertIn('unknown syntax', self.fh.getvalue()) - - def RV(self): - raise Exception(self.fh.getvalue()) - - def test_spec_format_response(self): - spec = self.shell.amqp['exchange.declare'] - self.assertEqual(spec.format_response(None), 'ok.') - self.assertEqual(spec.format_response('NO'), 'NO') - - def test_missing_namespace(self): - self.shell.onecmd('ns.cmd arg') - self.assertIn('unknown syntax', self.fh.getvalue()) - - def test_help(self): - self.shell.onecmd('help') - self.assertIn('Example:', self.fh.getvalue()) - - def test_help_command(self): - self.shell.onecmd('help queue.declare') - self.assertIn('passive:no', self.fh.getvalue()) - - def test_help_unknown_command(self): - self.shell.onecmd('help foo.baz') - self.assertIn('unknown syntax', self.fh.getvalue()) - - def test_onecmd_error(self): - self.shell.dispatch = Mock() - self.shell.dispatch.side_effect = MemoryError() - self.shell.say = Mock() - self.assertFalse(self.shell.needs_reconnect) - self.shell.onecmd('hello') - self.assertTrue(self.shell.say.called) - self.assertTrue(self.shell.needs_reconnect) - - def test_exit(self): - with self.assertRaises(SystemExit): - self.shell.onecmd('exit') - self.assertIn("don't leave!", self.fh.getvalue()) - - def test_note_silent(self): - self.shell.silent = True - self.shell.note('foo bar') - self.assertNotIn('foo bar', self.fh.getvalue()) - - def test_reconnect(self): - self.shell.onecmd('queue.declare foo') - self.shell.needs_reconnect = True - self.shell.onecmd('queue.delete foo') - - def test_completenames(self): - self.assertEqual( - self.shell.completenames('queue.dec'), - ['queue.declare'], - ) - self.assertEqual( - sorted(self.shell.completenames('declare')), - sorted(['queue.declare', 'exchange.declare']), - ) - - def test_empty_line(self): - self.shell.emptyline = Mock() - self.shell.default = Mock() - self.shell.onecmd('') - self.shell.emptyline.assert_called_with() - self.shell.onecmd('foo') - self.shell.default.assert_called_with('foo') - - def test_respond(self): - self.shell.respond({'foo': 'bar'}) - self.assertIn('foo', self.fh.getvalue()) - - def test_prompt(self): - self.assertTrue(self.shell.prompt) - - def test_no_returns(self): - self.shell.onecmd('queue.declare foo') - self.shell.onecmd('exchange.declare bar direct yes') - self.shell.onecmd('queue.bind foo bar baz') - self.shell.onecmd('basic.ack 1') - - def test_dump_message(self): - m = Mock() - m.body = 'the quick brown fox' - m.properties = {'a': 1} - m.delivery_info = {'exchange': 'bar'} - self.assertTrue(dump_message(m)) - - def test_dump_message_no_message(self): - self.assertIn('No messages in queue', dump_message(None)) - - def test_note(self): - self.adm.silent = True - self.adm.note('FOO') - self.assertNotIn('FOO', self.fh.getvalue()) - - def test_run(self): - a = self.create_adm('queue.declare foo') - a.run() - self.assertIn('ok', self.fh.getvalue()) - - def test_run_loop(self): - a = self.create_adm() - a.Shell = Mock() - shell = a.Shell.return_value = Mock() - shell.cmdloop = Mock() - a.run() - shell.cmdloop.assert_called_with() - - shell.cmdloop.side_effect = KeyboardInterrupt() - a.run() - self.assertIn('bibi', self.fh.getvalue()) - - @patch('celery.bin.amqp.amqp') - def test_main(self, Command): - c = Command.return_value = Mock() - main() - c.execute_from_commandline.assert_called_with() - - @patch('celery.bin.amqp.AMQPAdmin') - def test_command(self, cls): - x = amqp(app=self.app) - x.run() - self.assertIs(cls.call_args[1]['app'], self.app) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_base.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_base.py deleted file mode 100644 index 61d56fe..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_base.py +++ /dev/null @@ -1,332 +0,0 @@ -from __future__ import absolute_import - -import os - -from celery.bin.base import ( - Command, - Option, - Extensions, - HelpFormatter, -) -from celery.tests.case import ( - AppCase, Mock, depends_on_current_app, override_stdouts, patch, -) - - -class Object(object): - pass - - -class MyApp(object): - user_options = {'preload': None} - -APP = MyApp() # <-- Used by test_with_custom_app - - -class MockCommand(Command): - mock_args = ('arg1', 'arg2', 'arg3') - - def parse_options(self, prog_name, arguments, command=None): - options = Object() - options.foo = 'bar' - options.prog_name = prog_name - return options, self.mock_args - - def run(self, *args, **kwargs): - return args, kwargs - - -class test_Extensions(AppCase): - - def test_load(self): - with patch('pkg_resources.iter_entry_points') as iterep: - with patch('celery.bin.base.symbol_by_name') as symbyname: - ep = Mock() - ep.name = 'ep' - ep.module_name = 'foo' - ep.attrs = ['bar', 'baz'] - iterep.return_value = [ep] - cls = symbyname.return_value = Mock() - register = Mock() - e = Extensions('unit', register) - e.load() - symbyname.assert_called_with('foo:bar') - register.assert_called_with(cls, name='ep') - - with patch('celery.bin.base.symbol_by_name') as symbyname: - symbyname.side_effect = SyntaxError() - with patch('warnings.warn') as warn: - e.load() - self.assertTrue(warn.called) - - with patch('celery.bin.base.symbol_by_name') as symbyname: - symbyname.side_effect = KeyError('foo') - with self.assertRaises(KeyError): - e.load() - - -class test_HelpFormatter(AppCase): - - def test_format_epilog(self): - f = HelpFormatter() - self.assertTrue(f.format_epilog('hello')) - self.assertFalse(f.format_epilog('')) - - def test_format_description(self): - f = HelpFormatter() - self.assertTrue(f.format_description('hello')) - - -class test_Command(AppCase): - - def test_get_options(self): - cmd = Command() - cmd.option_list = (1, 2, 3) - self.assertTupleEqual(cmd.get_options(), (1, 2, 3)) - - def test_custom_description(self): - - class C(Command): - description = 'foo' - - c = C() - self.assertEqual(c.description, 'foo') - - def test_register_callbacks(self): - c = Command(on_error=8, on_usage_error=9) - self.assertEqual(c.on_error, 8) - self.assertEqual(c.on_usage_error, 9) - - def test_run_raises_UsageError(self): - cb = Mock() - c = Command(on_usage_error=cb) - c.verify_args = Mock() - c.run = Mock() - exc = c.run.side_effect = c.UsageError('foo', status=3) - - self.assertEqual(c(), exc.status) - cb.assert_called_with(exc) - c.verify_args.assert_called_with(()) - - def test_default_on_usage_error(self): - cmd = Command() - cmd.handle_error = Mock() - exc = Exception() - cmd.on_usage_error(exc) - cmd.handle_error.assert_called_with(exc) - - def test_verify_args_missing(self): - c = Command() - - def run(a, b, c): - pass - c.run = run - - with self.assertRaises(c.UsageError): - c.verify_args((1, )) - c.verify_args((1, 2, 3)) - - def test_run_interface(self): - with self.assertRaises(NotImplementedError): - Command().run() - - @patch('sys.stdout') - def test_early_version(self, stdout): - cmd = Command() - with self.assertRaises(SystemExit): - cmd.early_version(['--version']) - - def test_execute_from_commandline(self): - cmd = MockCommand(app=self.app) - args1, kwargs1 = cmd.execute_from_commandline() # sys.argv - self.assertTupleEqual(args1, cmd.mock_args) - self.assertDictContainsSubset({'foo': 'bar'}, kwargs1) - self.assertTrue(kwargs1.get('prog_name')) - args2, kwargs2 = cmd.execute_from_commandline(['foo']) # pass list - self.assertTupleEqual(args2, cmd.mock_args) - self.assertDictContainsSubset({'foo': 'bar', 'prog_name': 'foo'}, - kwargs2) - - def test_with_bogus_args(self): - with override_stdouts() as (_, stderr): - cmd = MockCommand(app=self.app) - cmd.supports_args = False - with self.assertRaises(SystemExit): - cmd.execute_from_commandline(argv=['--bogus']) - self.assertTrue(stderr.getvalue()) - self.assertIn('Unrecognized', stderr.getvalue()) - - def test_with_custom_config_module(self): - prev = os.environ.pop('CELERY_CONFIG_MODULE', None) - try: - cmd = MockCommand(app=self.app) - cmd.setup_app_from_commandline(['--config=foo.bar.baz']) - self.assertEqual(os.environ.get('CELERY_CONFIG_MODULE'), - 'foo.bar.baz') - finally: - if prev: - os.environ['CELERY_CONFIG_MODULE'] = prev - else: - os.environ.pop('CELERY_CONFIG_MODULE', None) - - def test_with_custom_broker(self): - prev = os.environ.pop('CELERY_BROKER_URL', None) - try: - cmd = MockCommand(app=self.app) - cmd.setup_app_from_commandline(['--broker=xyzza://']) - self.assertEqual( - os.environ.get('CELERY_BROKER_URL'), 'xyzza://', - ) - finally: - if prev: - os.environ['CELERY_BROKER_URL'] = prev - else: - os.environ.pop('CELERY_BROKER_URL', None) - - def test_with_custom_app(self): - cmd = MockCommand(app=self.app) - app = '.'.join([__name__, 'APP']) - cmd.setup_app_from_commandline(['--app=%s' % (app, ), - '--loglevel=INFO']) - self.assertIs(cmd.app, APP) - cmd.setup_app_from_commandline(['-A', app, - '--loglevel=INFO']) - self.assertIs(cmd.app, APP) - - def test_setup_app_sets_quiet(self): - cmd = MockCommand(app=self.app) - cmd.setup_app_from_commandline(['-q']) - self.assertTrue(cmd.quiet) - cmd2 = MockCommand(app=self.app) - cmd2.setup_app_from_commandline(['--quiet']) - self.assertTrue(cmd2.quiet) - - def test_setup_app_sets_chdir(self): - with patch('os.chdir') as chdir: - cmd = MockCommand(app=self.app) - cmd.setup_app_from_commandline(['--workdir=/opt']) - chdir.assert_called_with('/opt') - - def test_setup_app_sets_loader(self): - prev = os.environ.get('CELERY_LOADER') - try: - cmd = MockCommand(app=self.app) - cmd.setup_app_from_commandline(['--loader=X.Y:Z']) - self.assertEqual(os.environ['CELERY_LOADER'], 'X.Y:Z') - finally: - if prev is not None: - os.environ['CELERY_LOADER'] = prev - - def test_setup_app_no_respect(self): - cmd = MockCommand(app=self.app) - cmd.respects_app_option = False - with patch('celery.bin.base.Celery') as cp: - cmd.setup_app_from_commandline(['--app=x.y:z']) - self.assertTrue(cp.called) - - def test_setup_app_custom_app(self): - cmd = MockCommand(app=self.app) - app = cmd.app = Mock() - app.user_options = {'preload': None} - cmd.setup_app_from_commandline([]) - self.assertEqual(cmd.app, app) - - def test_find_app_suspects(self): - cmd = MockCommand(app=self.app) - self.assertTrue(cmd.find_app('celery.tests.bin.proj.app')) - self.assertTrue(cmd.find_app('celery.tests.bin.proj')) - self.assertTrue(cmd.find_app('celery.tests.bin.proj:hello')) - self.assertTrue(cmd.find_app('celery.tests.bin.proj.app:app')) - - with self.assertRaises(AttributeError): - cmd.find_app(__name__) - - def test_host_format(self): - cmd = MockCommand(app=self.app) - with patch('socket.gethostname') as hn: - hn.return_value = 'blacktron.example.com' - self.assertEqual(cmd.host_format(''), '') - self.assertEqual( - cmd.host_format('celery@%h'), - 'celery@blacktron.example.com', - ) - self.assertEqual( - cmd.host_format('celery@%d'), - 'celery@example.com', - ) - self.assertEqual( - cmd.host_format('celery@%n'), - 'celery@blacktron', - ) - - def test_say_chat_quiet(self): - cmd = MockCommand(app=self.app) - cmd.quiet = True - self.assertIsNone(cmd.say_chat('<-', 'foo', 'foo')) - - def test_say_chat_show_body(self): - cmd = MockCommand(app=self.app) - cmd.out = Mock() - cmd.show_body = True - cmd.say_chat('->', 'foo', 'body') - cmd.out.assert_called_with('body') - - def test_say_chat_no_body(self): - cmd = MockCommand(app=self.app) - cmd.out = Mock() - cmd.show_body = False - cmd.say_chat('->', 'foo', 'body') - - @depends_on_current_app - def test_with_cmdline_config(self): - cmd = MockCommand(app=self.app) - cmd.enable_config_from_cmdline = True - cmd.namespace = 'celeryd' - rest = cmd.setup_app_from_commandline(argv=[ - '--loglevel=INFO', '--', - 'broker.url=amqp://broker.example.com', - '.prefetch_multiplier=100']) - self.assertEqual(cmd.app.conf.BROKER_URL, - 'amqp://broker.example.com') - self.assertEqual(cmd.app.conf.CELERYD_PREFETCH_MULTIPLIER, 100) - self.assertListEqual(rest, ['--loglevel=INFO']) - - def test_find_app(self): - cmd = MockCommand(app=self.app) - with patch('celery.bin.base.symbol_by_name') as sbn: - from types import ModuleType - x = ModuleType('proj') - - def on_sbn(*args, **kwargs): - - def after(*args, **kwargs): - x.app = 'quick brown fox' - x.__path__ = None - return x - sbn.side_effect = after - return x - sbn.side_effect = on_sbn - x.__path__ = [True] - self.assertEqual(cmd.find_app('proj'), 'quick brown fox') - - def test_parse_preload_options_shortopt(self): - cmd = Command() - cmd.preload_options = (Option('-s', action='store', dest='silent'), ) - acc = cmd.parse_preload_options(['-s', 'yes']) - self.assertEqual(acc.get('silent'), 'yes') - - def test_parse_preload_options_with_equals_and_append(self): - cmd = Command() - opt = Option('--zoom', action='append', default=[]) - cmd.preload_options = (opt,) - acc = cmd.parse_preload_options(['--zoom=1', '--zoom=2']) - - self.assertEqual(acc, {'zoom': ['1', '2']}) - - def test_parse_preload_options_without_equals_and_append(self): - cmd = Command() - opt = Option('--zoom', action='append', default=[]) - cmd.preload_options = (opt,) - acc = cmd.parse_preload_options(['--zoom', '1', '--zoom', '2']) - - self.assertEqual(acc, {'zoom': ['1', '2']}) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_beat.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_beat.py deleted file mode 100644 index 45a7438..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_beat.py +++ /dev/null @@ -1,196 +0,0 @@ -from __future__ import absolute_import - -import logging -import sys - -from collections import defaultdict - -from celery import beat -from celery import platforms -from celery.bin import beat as beat_bin -from celery.apps import beat as beatapp - -from celery.tests.case import AppCase, Mock, patch, restore_logging -from kombu.tests.case import redirect_stdouts - - -class MockedShelveModule(object): - shelves = defaultdict(lambda: {}) - - def open(self, filename, *args, **kwargs): - return self.shelves[filename] -mocked_shelve = MockedShelveModule() - - -class MockService(beat.Service): - started = False - in_sync = False - persistence = mocked_shelve - - def start(self): - self.__class__.started = True - - def sync(self): - self.__class__.in_sync = True - - -class MockBeat(beatapp.Beat): - running = False - - def run(self): - MockBeat.running = True - - -class MockBeat2(beatapp.Beat): - Service = MockService - - def install_sync_handler(self, b): - pass - - -class MockBeat3(beatapp.Beat): - Service = MockService - - def install_sync_handler(self, b): - raise TypeError('xxx') - - -class test_Beat(AppCase): - - def test_loglevel_string(self): - b = beatapp.Beat(app=self.app, loglevel='DEBUG', - redirect_stdouts=False) - self.assertEqual(b.loglevel, logging.DEBUG) - - b2 = beatapp.Beat(app=self.app, loglevel=logging.DEBUG, - redirect_stdouts=False) - self.assertEqual(b2.loglevel, logging.DEBUG) - - def test_colorize(self): - self.app.log.setup = Mock() - b = beatapp.Beat(app=self.app, no_color=True, - redirect_stdouts=False) - b.setup_logging() - self.assertTrue(self.app.log.setup.called) - self.assertEqual(self.app.log.setup.call_args[1]['colorize'], False) - - def test_init_loader(self): - b = beatapp.Beat(app=self.app, redirect_stdouts=False) - b.init_loader() - - def test_process_title(self): - b = beatapp.Beat(app=self.app, redirect_stdouts=False) - b.set_process_title() - - def test_run(self): - b = MockBeat2(app=self.app, redirect_stdouts=False) - MockService.started = False - b.run() - self.assertTrue(MockService.started) - - def psig(self, fun, *args, **kwargs): - handlers = {} - - class Signals(platforms.Signals): - - def __setitem__(self, sig, handler): - handlers[sig] = handler - - p, platforms.signals = platforms.signals, Signals() - try: - fun(*args, **kwargs) - return handlers - finally: - platforms.signals = p - - def test_install_sync_handler(self): - b = beatapp.Beat(app=self.app, redirect_stdouts=False) - clock = MockService(app=self.app) - MockService.in_sync = False - handlers = self.psig(b.install_sync_handler, clock) - with self.assertRaises(SystemExit): - handlers['SIGINT']('SIGINT', object()) - self.assertTrue(MockService.in_sync) - MockService.in_sync = False - - def test_setup_logging(self): - with restore_logging(): - try: - # py3k - delattr(sys.stdout, 'logger') - except AttributeError: - pass - b = beatapp.Beat(app=self.app, redirect_stdouts=False) - b.redirect_stdouts = False - b.app.log.already_setup = False - b.setup_logging() - with self.assertRaises(AttributeError): - sys.stdout.logger - - @redirect_stdouts - @patch('celery.apps.beat.logger') - def test_logs_errors(self, logger, stdout, stderr): - with restore_logging(): - b = MockBeat3( - app=self.app, redirect_stdouts=False, socket_timeout=None, - ) - b.start_scheduler() - self.assertTrue(logger.critical.called) - - @redirect_stdouts - @patch('celery.platforms.create_pidlock') - def test_use_pidfile(self, create_pidlock, stdout, stderr): - b = MockBeat2(app=self.app, pidfile='pidfilelockfilepid', - socket_timeout=None, redirect_stdouts=False) - b.start_scheduler() - self.assertTrue(create_pidlock.called) - - -class MockDaemonContext(object): - opened = False - closed = False - - def __init__(self, *args, **kwargs): - pass - - def open(self): - self.__class__.opened = True - return self - __enter__ = open - - def close(self, *args): - self.__class__.closed = True - __exit__ = close - - -class test_div(AppCase): - - def setup(self): - self.prev, beatapp.Beat = beatapp.Beat, MockBeat - self.ctx, beat_bin.detached = ( - beat_bin.detached, MockDaemonContext, - ) - - def teardown(self): - beatapp.Beat = self.prev - - def test_main(self): - sys.argv = [sys.argv[0], '-s', 'foo'] - try: - beat_bin.main(app=self.app) - self.assertTrue(MockBeat.running) - finally: - MockBeat.running = False - - def test_detach(self): - cmd = beat_bin.beat() - cmd.app = self.app - cmd.run(detach=True) - self.assertTrue(MockDaemonContext.opened) - self.assertTrue(MockDaemonContext.closed) - - def test_parse_options(self): - cmd = beat_bin.beat() - cmd.app = self.app - options, args = cmd.parse_options('celery beat', ['-s', 'foo']) - self.assertEqual(options.schedule, 'foo') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celery.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celery.py deleted file mode 100644 index fbfdb62..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celery.py +++ /dev/null @@ -1,588 +0,0 @@ -from __future__ import absolute_import - -import sys - -from anyjson import dumps -from datetime import datetime - -from celery import __main__ -from celery.platforms import EX_FAILURE, EX_USAGE, EX_OK -from celery.bin.base import Error -from celery.bin.celery import ( - Command, - list_, - call, - purge, - result, - inspect, - control, - status, - migrate, - help, - report, - CeleryCommand, - determine_exit_status, - multi, - main as mainfun, - _RemoteControl, - command, -) - -from celery.tests.case import ( - AppCase, Mock, WhateverIO, override_stdouts, patch, -) - - -class test__main__(AppCase): - - def test_warn_deprecated(self): - with override_stdouts() as (stdout, _): - __main__._warn_deprecated('YADDA YADDA') - self.assertIn('command is deprecated', stdout.getvalue()) - self.assertIn('YADDA YADDA', stdout.getvalue()) - - def test_main(self): - with patch('celery.__main__.maybe_patch_concurrency') as mpc: - with patch('celery.bin.celery.main') as main: - __main__.main() - mpc.assert_called_with() - main.assert_called_with() - - def test_compat_worker(self): - with patch('celery.__main__.maybe_patch_concurrency') as mpc: - with patch('celery.__main__._warn_deprecated') as depr: - with patch('celery.bin.worker.main') as main: - __main__._compat_worker() - mpc.assert_called_with() - depr.assert_called_with('celery worker') - main.assert_called_with() - - def test_compat_multi(self): - with patch('celery.__main__.maybe_patch_concurrency') as mpc: - with patch('celery.__main__._warn_deprecated') as depr: - with patch('celery.bin.multi.main') as main: - __main__._compat_multi() - self.assertFalse(mpc.called) - depr.assert_called_with('celery multi') - main.assert_called_with() - - def test_compat_beat(self): - with patch('celery.__main__.maybe_patch_concurrency') as mpc: - with patch('celery.__main__._warn_deprecated') as depr: - with patch('celery.bin.beat.main') as main: - __main__._compat_beat() - mpc.assert_called_with() - depr.assert_called_with('celery beat') - main.assert_called_with() - - -class test_Command(AppCase): - - def test_Error_repr(self): - x = Error('something happened') - self.assertIsNotNone(x.status) - self.assertTrue(x.reason) - self.assertTrue(str(x)) - - def setup(self): - self.out = WhateverIO() - self.err = WhateverIO() - self.cmd = Command(self.app, stdout=self.out, stderr=self.err) - - def test_error(self): - self.cmd.out = Mock() - self.cmd.error('FOO') - self.assertTrue(self.cmd.out.called) - - def test_out(self): - f = Mock() - self.cmd.out('foo', f) - - def test_call(self): - - def ok_run(): - pass - - self.cmd.run = ok_run - self.assertEqual(self.cmd(), EX_OK) - - def error_run(): - raise Error('error', EX_FAILURE) - self.cmd.run = error_run - self.assertEqual(self.cmd(), EX_FAILURE) - - def test_run_from_argv(self): - with self.assertRaises(NotImplementedError): - self.cmd.run_from_argv('prog', ['foo', 'bar']) - - def test_pretty_list(self): - self.assertEqual(self.cmd.pretty([])[1], '- empty -') - self.assertIn('bar', self.cmd.pretty(['foo', 'bar'])[1]) - - def test_pretty_dict(self): - self.assertIn( - 'OK', - str(self.cmd.pretty({'ok': 'the quick brown fox'})[0]), - ) - self.assertIn( - 'ERROR', - str(self.cmd.pretty({'error': 'the quick brown fox'})[0]), - ) - - def test_pretty(self): - self.assertIn('OK', str(self.cmd.pretty('the quick brown'))) - self.assertIn('OK', str(self.cmd.pretty(object()))) - self.assertIn('OK', str(self.cmd.pretty({'foo': 'bar'}))) - - -class test_list(AppCase): - - def test_list_bindings_no_support(self): - l = list_(app=self.app, stderr=WhateverIO()) - management = Mock() - management.get_bindings.side_effect = NotImplementedError() - with self.assertRaises(Error): - l.list_bindings(management) - - def test_run(self): - l = list_(app=self.app, stderr=WhateverIO()) - l.run('bindings') - - with self.assertRaises(Error): - l.run(None) - - with self.assertRaises(Error): - l.run('foo') - - -class test_call(AppCase): - - def setup(self): - - @self.app.task(shared=False) - def add(x, y): - return x + y - self.add = add - - @patch('celery.app.base.Celery.send_task') - def test_run(self, send_task): - a = call(app=self.app, stderr=WhateverIO(), stdout=WhateverIO()) - a.run(self.add.name) - self.assertTrue(send_task.called) - - a.run(self.add.name, - args=dumps([4, 4]), - kwargs=dumps({'x': 2, 'y': 2})) - self.assertEqual(send_task.call_args[1]['args'], [4, 4]) - self.assertEqual(send_task.call_args[1]['kwargs'], {'x': 2, 'y': 2}) - - a.run(self.add.name, expires=10, countdown=10) - self.assertEqual(send_task.call_args[1]['expires'], 10) - self.assertEqual(send_task.call_args[1]['countdown'], 10) - - now = datetime.now() - iso = now.isoformat() - a.run(self.add.name, expires=iso) - self.assertEqual(send_task.call_args[1]['expires'], now) - with self.assertRaises(ValueError): - a.run(self.add.name, expires='foobaribazibar') - - -class test_purge(AppCase): - - @patch('celery.app.control.Control.purge') - def test_run(self, purge_): - out = WhateverIO() - a = purge(app=self.app, stdout=out) - purge_.return_value = 0 - a.run(force=True) - self.assertIn('No messages purged', out.getvalue()) - - purge_.return_value = 100 - a.run(force=True) - self.assertIn('100 messages', out.getvalue()) - - -class test_result(AppCase): - - def setup(self): - - @self.app.task(shared=False) - def add(x, y): - return x + y - self.add = add - - def test_run(self): - with patch('celery.result.AsyncResult.get') as get: - out = WhateverIO() - r = result(app=self.app, stdout=out) - get.return_value = 'Jerry' - r.run('id') - self.assertIn('Jerry', out.getvalue()) - - get.return_value = 'Elaine' - r.run('id', task=self.add.name) - self.assertIn('Elaine', out.getvalue()) - - with patch('celery.result.AsyncResult.traceback') as tb: - r.run('id', task=self.add.name, traceback=True) - self.assertIn(str(tb), out.getvalue()) - - -class test_status(AppCase): - - @patch('celery.bin.celery.inspect') - def test_run(self, inspect_): - out, err = WhateverIO(), WhateverIO() - ins = inspect_.return_value = Mock() - ins.run.return_value = [] - s = status(self.app, stdout=out, stderr=err) - with self.assertRaises(Error): - s.run() - - ins.run.return_value = ['a', 'b', 'c'] - s.run() - self.assertIn('3 nodes online', out.getvalue()) - s.run(quiet=True) - - -class test_migrate(AppCase): - - @patch('celery.contrib.migrate.migrate_tasks') - def test_run(self, migrate_tasks): - out = WhateverIO() - m = migrate(app=self.app, stdout=out, stderr=WhateverIO()) - with self.assertRaises(TypeError): - m.run() - self.assertFalse(migrate_tasks.called) - - m.run('memory://foo', 'memory://bar') - self.assertTrue(migrate_tasks.called) - - state = Mock() - state.count = 10 - state.strtotal = 30 - m.on_migrate_task(state, {'task': 'tasks.add', 'id': 'ID'}, None) - self.assertIn('10/30', out.getvalue()) - - -class test_report(AppCase): - - def test_run(self): - out = WhateverIO() - r = report(app=self.app, stdout=out) - self.assertEqual(r.run(), EX_OK) - self.assertTrue(out.getvalue()) - - -class test_help(AppCase): - - def test_run(self): - out = WhateverIO() - h = help(app=self.app, stdout=out) - h.parser = Mock() - self.assertEqual(h.run(), EX_USAGE) - self.assertTrue(out.getvalue()) - self.assertTrue(h.usage('help')) - h.parser.print_help.assert_called_with() - - -class test_CeleryCommand(AppCase): - - def test_execute_from_commandline(self): - x = CeleryCommand(app=self.app) - x.handle_argv = Mock() - x.handle_argv.return_value = 1 - with self.assertRaises(SystemExit): - x.execute_from_commandline() - - x.handle_argv.return_value = True - with self.assertRaises(SystemExit): - x.execute_from_commandline() - - x.handle_argv.side_effect = KeyboardInterrupt() - with self.assertRaises(SystemExit): - x.execute_from_commandline() - - x.respects_app_option = True - with self.assertRaises(SystemExit): - x.execute_from_commandline(['celery', 'multi']) - self.assertFalse(x.respects_app_option) - x.respects_app_option = True - with self.assertRaises(SystemExit): - x.execute_from_commandline(['manage.py', 'celery', 'multi']) - self.assertFalse(x.respects_app_option) - - def test_with_pool_option(self): - x = CeleryCommand(app=self.app) - self.assertIsNone(x.with_pool_option(['celery', 'events'])) - self.assertTrue(x.with_pool_option(['celery', 'worker'])) - self.assertTrue(x.with_pool_option(['manage.py', 'celery', 'worker'])) - - def test_load_extensions_no_commands(self): - with patch('celery.bin.celery.Extensions') as Ext: - ext = Ext.return_value = Mock(name='Extension') - ext.load.return_value = None - x = CeleryCommand(app=self.app) - x.load_extension_commands() - - def test_determine_exit_status(self): - self.assertEqual(determine_exit_status('true'), EX_OK) - self.assertEqual(determine_exit_status(''), EX_FAILURE) - - def test_relocate_args_from_start(self): - x = CeleryCommand(app=self.app) - self.assertEqual(x._relocate_args_from_start(None), []) - self.assertEqual( - x._relocate_args_from_start( - ['-l', 'debug', 'worker', '-c', '3', '--foo'], - ), - ['worker', '-c', '3', '--foo', '-l', 'debug'], - ) - self.assertEqual( - x._relocate_args_from_start( - ['--pool=gevent', '-l', 'debug', 'worker', '--foo', '-c', '3'], - ), - ['worker', '--foo', '-c', '3', '--pool=gevent', '-l', 'debug'], - ) - self.assertEqual( - x._relocate_args_from_start(['foo', '--foo=1']), - ['foo', '--foo=1'], - ) - - def test_handle_argv(self): - x = CeleryCommand(app=self.app) - x.execute = Mock() - x.handle_argv('celery', []) - x.execute.assert_called_with('help', ['help']) - - x.handle_argv('celery', ['start', 'foo']) - x.execute.assert_called_with('start', ['start', 'foo']) - - def test_execute(self): - x = CeleryCommand(app=self.app) - Help = x.commands['help'] = Mock() - help = Help.return_value = Mock() - x.execute('fooox', ['a']) - help.run_from_argv.assert_called_with(x.prog_name, [], command='help') - help.reset() - x.execute('help', ['help']) - help.run_from_argv.assert_called_with(x.prog_name, [], command='help') - - Dummy = x.commands['dummy'] = Mock() - dummy = Dummy.return_value = Mock() - exc = dummy.run_from_argv.side_effect = Error( - 'foo', status='EX_FAILURE', - ) - x.on_error = Mock(name='on_error') - help.reset() - x.execute('dummy', ['dummy']) - x.on_error.assert_called_with(exc) - dummy.run_from_argv.assert_called_with( - x.prog_name, [], command='dummy', - ) - help.run_from_argv.assert_called_with( - x.prog_name, [], command='help', - ) - - exc = dummy.run_from_argv.side_effect = x.UsageError('foo') - x.on_usage_error = Mock() - x.execute('dummy', ['dummy']) - x.on_usage_error.assert_called_with(exc) - - def test_on_usage_error(self): - x = CeleryCommand(app=self.app) - x.error = Mock() - x.on_usage_error(x.UsageError('foo'), command=None) - self.assertTrue(x.error.called) - x.on_usage_error(x.UsageError('foo'), command='dummy') - - def test_prepare_prog_name(self): - x = CeleryCommand(app=self.app) - main = Mock(name='__main__') - main.__file__ = '/opt/foo.py' - with patch.dict(sys.modules, __main__=main): - self.assertEqual(x.prepare_prog_name('__main__.py'), '/opt/foo.py') - self.assertEqual(x.prepare_prog_name('celery'), 'celery') - - -class test_RemoteControl(AppCase): - - def test_call_interface(self): - with self.assertRaises(NotImplementedError): - _RemoteControl(app=self.app).call() - - -class test_inspect(AppCase): - - def test_usage(self): - self.assertTrue(inspect(app=self.app).usage('foo')) - - def test_command_info(self): - i = inspect(app=self.app) - self.assertTrue(i.get_command_info( - 'ping', help=True, color=i.colored.red, - )) - - def test_list_commands_color(self): - i = inspect(app=self.app) - self.assertTrue(i.list_commands( - help=True, color=i.colored.red, - )) - self.assertTrue(i.list_commands( - help=False, color=None, - )) - - def test_epilog(self): - self.assertTrue(inspect(app=self.app).epilog) - - def test_do_call_method_sql_transport_type(self): - self.app.connection = Mock() - conn = self.app.connection.return_value = Mock(name='Connection') - conn.transport.driver_type = 'sql' - i = inspect(app=self.app) - with self.assertRaises(i.Error): - i.do_call_method(['ping']) - - def test_say_directions(self): - i = inspect(self.app) - i.out = Mock() - i.quiet = True - i.say_chat('<-', 'hello out') - self.assertFalse(i.out.called) - - i.say_chat('->', 'hello in') - self.assertTrue(i.out.called) - - i.quiet = False - i.out.reset_mock() - i.say_chat('<-', 'hello out', 'body') - self.assertTrue(i.out.called) - - @patch('celery.app.control.Control.inspect') - def test_run(self, real): - out = WhateverIO() - i = inspect(app=self.app, stdout=out) - with self.assertRaises(Error): - i.run() - with self.assertRaises(Error): - i.run('help') - with self.assertRaises(Error): - i.run('xyzzybaz') - - i.run('ping') - self.assertTrue(real.called) - i.run('ping', destination='foo,bar') - self.assertEqual(real.call_args[1]['destination'], ['foo', 'bar']) - self.assertEqual(real.call_args[1]['timeout'], 0.2) - callback = real.call_args[1]['callback'] - - callback({'foo': {'ok': 'pong'}}) - self.assertIn('OK', out.getvalue()) - - instance = real.return_value = Mock() - instance.ping.return_value = None - with self.assertRaises(Error): - i.run('ping') - - out.seek(0) - out.truncate() - i.quiet = True - i.say_chat('<-', 'hello') - self.assertFalse(out.getvalue()) - - -class test_control(AppCase): - - def control(self, patch_call, *args, **kwargs): - kwargs.setdefault('app', Mock(name='app')) - c = control(*args, **kwargs) - if patch_call: - c.call = Mock(name='control.call') - return c - - def test_call(self): - i = self.control(False) - i.call('foo', 1, kw=2) - i.app.control.foo.assert_called_with(1, kw=2, reply=True) - - def test_pool_grow(self): - i = self.control(True) - i.pool_grow('pool_grow', n=2) - i.call.assert_called_with('pool_grow', 2) - - def test_pool_shrink(self): - i = self.control(True) - i.pool_shrink('pool_shrink', n=2) - i.call.assert_called_with('pool_shrink', 2) - - def test_autoscale(self): - i = self.control(True) - i.autoscale('autoscale', max=3, min=2) - i.call.assert_called_with('autoscale', 3, 2) - - def test_rate_limit(self): - i = self.control(True) - i.rate_limit('rate_limit', 'proj.add', '1/s') - i.call.assert_called_with('rate_limit', 'proj.add', '1/s') - - def test_time_limit(self): - i = self.control(True) - i.time_limit('time_limit', 'proj.add', 10, 30) - i.call.assert_called_with('time_limit', 'proj.add', 10, 30) - - def test_add_consumer(self): - i = self.control(True) - i.add_consumer( - 'add_consumer', 'queue', 'exchange', 'topic', 'rkey', - durable=True, - ) - i.call.assert_called_with( - 'add_consumer', 'queue', 'exchange', 'topic', 'rkey', - durable=True, - ) - - def test_cancel_consumer(self): - i = self.control(True) - i.cancel_consumer('cancel_consumer', 'queue') - i.call.assert_called_with('cancel_consumer', 'queue') - - -class test_multi(AppCase): - - def test_get_options(self): - self.assertTupleEqual(multi(app=self.app).get_options(), ()) - - def test_run_from_argv(self): - with patch('celery.bin.multi.MultiTool') as MultiTool: - m = MultiTool.return_value = Mock() - multi(self.app).run_from_argv('celery', ['arg'], command='multi') - m.execute_from_commandline.assert_called_with( - ['multi', 'arg'], 'celery', - ) - - -class test_main(AppCase): - - @patch('celery.bin.celery.CeleryCommand') - def test_main(self, Command): - cmd = Command.return_value = Mock() - mainfun() - cmd.execute_from_commandline.assert_called_with(None) - - @patch('celery.bin.celery.CeleryCommand') - def test_main_KeyboardInterrupt(self, Command): - cmd = Command.return_value = Mock() - cmd.execute_from_commandline.side_effect = KeyboardInterrupt() - mainfun() - cmd.execute_from_commandline.assert_called_with(None) - - -class test_compat(AppCase): - - def test_compat_command_decorator(self): - with patch('celery.bin.celery.CeleryCommand') as CC: - self.assertEqual(command(), CC.register_command) - fun = Mock(name='fun') - command(fun) - CC.register_command.assert_called_with(fun) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celeryd_detach.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celeryd_detach.py deleted file mode 100644 index 0fa3934..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celeryd_detach.py +++ /dev/null @@ -1,106 +0,0 @@ -from __future__ import absolute_import - -from celery.platforms import IS_WINDOWS -from celery.bin.celeryd_detach import ( - detach, - detached_celeryd, - main, -) - -from celery.tests.case import AppCase, Mock, override_stdouts, patch - - -if not IS_WINDOWS: - class test_detached(AppCase): - - @patch('celery.bin.celeryd_detach.detached') - @patch('os.execv') - @patch('celery.bin.celeryd_detach.logger') - @patch('celery.app.log.Logging.setup_logging_subsystem') - def test_execs(self, setup_logs, logger, execv, detached): - context = detached.return_value = Mock() - context.__enter__ = Mock() - context.__exit__ = Mock() - - detach('/bin/boo', ['a', 'b', 'c'], logfile='/var/log', - pidfile='/var/pid', hostname='foo@example.com') - detached.assert_called_with( - '/var/log', '/var/pid', None, None, None, None, False, - after_forkers=False, - ) - execv.assert_called_with('/bin/boo', ['/bin/boo', 'a', 'b', 'c']) - - execv.side_effect = Exception('foo') - r = detach( - '/bin/boo', ['a', 'b', 'c'], - logfile='/var/log', pidfile='/var/pid', - hostname='foo@example.com', app=self.app) - context.__enter__.assert_called_with() - self.assertTrue(logger.critical.called) - setup_logs.assert_called_with( - 'ERROR', '/var/log', hostname='foo@example.com') - self.assertEqual(r, 1) - - -class test_PartialOptionParser(AppCase): - - def test_parser(self): - x = detached_celeryd(self.app) - p = x.Parser('celeryd_detach') - options, values = p.parse_args(['--logfile=foo', '--fake', '--enable', - 'a', 'b', '-c1', '-d', '2']) - self.assertEqual(options.logfile, 'foo') - self.assertEqual(values, ['a', 'b']) - self.assertEqual(p.leftovers, ['--enable', '-c1', '-d', '2']) - - with override_stdouts(): - with self.assertRaises(SystemExit): - p.parse_args(['--logfile']) - p.get_option('--logfile').nargs = 2 - with self.assertRaises(SystemExit): - p.parse_args(['--logfile=a']) - with self.assertRaises(SystemExit): - p.parse_args(['--fake=abc']) - - assert p.get_option('--logfile').nargs == 2 - p.parse_args(['--logfile=a', 'b']) - p.get_option('--logfile').nargs = 1 - - -class test_Command(AppCase): - argv = ['--autoscale=10,2', '-c', '1', - '--logfile=/var/log', '-lDEBUG', - '--', '.disable_rate_limits=1'] - - def test_parse_options(self): - x = detached_celeryd(app=self.app) - o, v, l = x.parse_options('cd', self.argv) - self.assertEqual(o.logfile, '/var/log') - self.assertEqual(l, ['--autoscale=10,2', '-c', '1', - '-lDEBUG', '--logfile=/var/log', - '--pidfile=celeryd.pid']) - x.parse_options('cd', []) # no args - - @patch('sys.exit') - @patch('celery.bin.celeryd_detach.detach') - def test_execute_from_commandline(self, detach, exit): - x = detached_celeryd(app=self.app) - x.execute_from_commandline(self.argv) - self.assertTrue(exit.called) - detach.assert_called_with( - path=x.execv_path, uid=None, gid=None, - umask=None, fake=False, logfile='/var/log', pidfile='celeryd.pid', - working_directory=None, executable=None, hostname=None, - argv=x.execv_argv + [ - '-c', '1', '-lDEBUG', - '--logfile=/var/log', '--pidfile=celeryd.pid', - '--', '.disable_rate_limits=1' - ], - app=self.app, - ) - - @patch('celery.bin.celeryd_detach.detached_celeryd') - def test_main(self, command): - c = command.return_value = Mock() - main(self.app) - c.execute_from_commandline.assert_called_with() diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celeryevdump.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celeryevdump.py deleted file mode 100644 index 09cdc4d..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celeryevdump.py +++ /dev/null @@ -1,68 +0,0 @@ -from __future__ import absolute_import - -from time import time - -from celery.events.dumper import ( - humanize_type, - Dumper, - evdump, -) - -from celery.tests.case import AppCase, Mock, WhateverIO, patch - - -class test_Dumper(AppCase): - - def setup(self): - self.out = WhateverIO() - self.dumper = Dumper(out=self.out) - - def test_humanize_type(self): - self.assertEqual(humanize_type('worker-offline'), 'shutdown') - self.assertEqual(humanize_type('task-started'), 'task started') - - def test_format_task_event(self): - self.dumper.format_task_event( - 'worker@example.com', time(), 'task-started', 'tasks.add', {}) - self.assertTrue(self.out.getvalue()) - - def test_on_event(self): - event = { - 'hostname': 'worker@example.com', - 'timestamp': time(), - 'uuid': '1ef', - 'name': 'tasks.add', - 'args': '(2, 2)', - 'kwargs': '{}', - } - self.dumper.on_event(dict(event, type='task-received')) - self.assertTrue(self.out.getvalue()) - self.dumper.on_event(dict(event, type='task-revoked')) - self.dumper.on_event(dict(event, type='worker-online')) - - @patch('celery.events.EventReceiver.capture') - def test_evdump(self, capture): - capture.side_effect = KeyboardInterrupt() - evdump(app=self.app) - - def test_evdump_error_handler(self): - app = Mock(name='app') - with patch('celery.events.dumper.Dumper') as Dumper: - Dumper.return_value = Mock(name='dumper') - recv = app.events.Receiver.return_value = Mock() - - def se(*_a, **_k): - recv.capture.side_effect = SystemExit() - raise KeyError() - recv.capture.side_effect = se - - Conn = app.connection.return_value = Mock(name='conn') - conn = Conn.clone.return_value = Mock(name='cloned_conn') - conn.connection_errors = (KeyError, ) - conn.channel_errors = () - - evdump(app) - self.assertTrue(conn.ensure_connection.called) - errback = conn.ensure_connection.call_args[0][0] - errback(KeyError(), 1) - self.assertTrue(conn.as_uri.called) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_events.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_events.py deleted file mode 100644 index a6e79f7..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_events.py +++ /dev/null @@ -1,73 +0,0 @@ -from __future__ import absolute_import - -from celery.bin import events - -from celery.tests.case import AppCase, SkipTest, patch, _old_patch - - -class MockCommand(object): - executed = [] - - def execute_from_commandline(self, **kwargs): - self.executed.append(True) - - -def proctitle(prog, info=None): - proctitle.last = (prog, info) -proctitle.last = () - - -class test_events(AppCase): - - def setup(self): - self.ev = events.events(app=self.app) - - @_old_patch('celery.events.dumper', 'evdump', - lambda **kw: 'me dumper, you?') - @_old_patch('celery.bin.events', 'set_process_title', proctitle) - def test_run_dump(self): - self.assertEqual(self.ev.run(dump=True), 'me dumper, you?') - self.assertIn('celery events:dump', proctitle.last[0]) - - def test_run_top(self): - try: - import curses # noqa - except ImportError: - raise SkipTest('curses monitor requires curses') - - @_old_patch('celery.events.cursesmon', 'evtop', - lambda **kw: 'me top, you?') - @_old_patch('celery.bin.events', 'set_process_title', proctitle) - def _inner(): - self.assertEqual(self.ev.run(), 'me top, you?') - self.assertIn('celery events:top', proctitle.last[0]) - return _inner() - - @_old_patch('celery.events.snapshot', 'evcam', - lambda *a, **k: (a, k)) - @_old_patch('celery.bin.events', 'set_process_title', proctitle) - def test_run_cam(self): - a, kw = self.ev.run(camera='foo.bar.baz', logfile='logfile') - self.assertEqual(a[0], 'foo.bar.baz') - self.assertEqual(kw['freq'], 1.0) - self.assertIsNone(kw['maxrate']) - self.assertEqual(kw['loglevel'], 'INFO') - self.assertEqual(kw['logfile'], 'logfile') - self.assertIn('celery events:cam', proctitle.last[0]) - - @patch('celery.events.snapshot.evcam') - @patch('celery.bin.events.detached') - def test_run_cam_detached(self, detached, evcam): - self.ev.prog_name = 'celery events' - self.ev.run_evcam('myapp.Camera', detach=True) - self.assertTrue(detached.called) - self.assertTrue(evcam.called) - - def test_get_options(self): - self.assertTrue(self.ev.get_options()) - - @_old_patch('celery.bin.events', 'events', MockCommand) - def test_main(self): - MockCommand.executed = [] - events.main() - self.assertTrue(MockCommand.executed) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_multi.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_multi.py deleted file mode 100644 index ee77a45..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_multi.py +++ /dev/null @@ -1,474 +0,0 @@ -from __future__ import absolute_import - -import errno -import signal -import sys - -from celery.bin.multi import ( - main, - MultiTool, - findsig, - abbreviations, - parse_ns_range, - format_opt, - quote, - NamespacedOptionParser, - multi_args, - __doc__ as doc, -) - -from celery.tests.case import AppCase, Mock, WhateverIO, SkipTest, patch - - -class test_functions(AppCase): - - def test_findsig(self): - self.assertEqual(findsig(['a', 'b', 'c', '-1']), 1) - self.assertEqual(findsig(['--foo=1', '-9']), 9) - self.assertEqual(findsig(['-INT']), signal.SIGINT) - self.assertEqual(findsig([]), signal.SIGTERM) - self.assertEqual(findsig(['-s']), signal.SIGTERM) - self.assertEqual(findsig(['-log']), signal.SIGTERM) - - def test_abbreviations(self): - expander = abbreviations({'%s': 'START', - '%x': 'STOP'}) - self.assertEqual(expander('foo%s'), 'fooSTART') - self.assertEqual(expander('foo%x'), 'fooSTOP') - self.assertEqual(expander('foo%y'), 'foo%y') - self.assertIsNone(expander(None)) - - def test_parse_ns_range(self): - self.assertEqual(parse_ns_range('1-3', True), ['1', '2', '3']) - self.assertEqual(parse_ns_range('1-3', False), ['1-3']) - self.assertEqual(parse_ns_range( - '1-3,10,11,20', True), - ['1', '2', '3', '10', '11', '20'], - ) - - def test_format_opt(self): - self.assertEqual(format_opt('--foo', None), '--foo') - self.assertEqual(format_opt('-c', 1), '-c 1') - self.assertEqual(format_opt('--log', 'foo'), '--log=foo') - - def test_quote(self): - self.assertEqual(quote("the 'quick"), "'the '\\''quick'") - - -class test_NamespacedOptionParser(AppCase): - - def test_parse(self): - x = NamespacedOptionParser(['-c:1,3', '4']) - self.assertEqual(x.namespaces.get('1,3'), {'-c': '4'}) - x = NamespacedOptionParser(['-c:jerry,elaine', '5', - '--loglevel:kramer=DEBUG', - '--flag', - '--logfile=foo', '-Q', 'bar', 'a', 'b', - '--', '.disable_rate_limits=1']) - self.assertEqual(x.options, {'--logfile': 'foo', - '-Q': 'bar', - '--flag': None}) - self.assertEqual(x.values, ['a', 'b']) - self.assertEqual(x.namespaces.get('jerry,elaine'), {'-c': '5'}) - self.assertEqual(x.namespaces.get('kramer'), {'--loglevel': 'DEBUG'}) - self.assertEqual(x.passthrough, '-- .disable_rate_limits=1') - - -class test_multi_args(AppCase): - - @patch('socket.gethostname') - def test_parse(self, gethostname): - p = NamespacedOptionParser([ - '-c:jerry,elaine', '5', - '--loglevel:kramer=DEBUG', - '--flag', - '--logfile=foo', '-Q', 'bar', 'jerry', - 'elaine', 'kramer', - '--', '.disable_rate_limits=1', - ]) - it = multi_args(p, cmd='COMMAND', append='*AP*', - prefix='*P*', suffix='*S*') - names = list(it) - - def assert_line_in(name, args): - self.assertIn(name, [tup[0] for tup in names]) - argv = None - for item in names: - if item[0] == name: - argv = item[1] - self.assertTrue(argv) - for arg in args: - self.assertIn(arg, argv) - - assert_line_in( - '*P*jerry@*S*', - ['COMMAND', '-n *P*jerry@*S*', '-Q bar', - '-c 5', '--flag', '--logfile=foo', - '-- .disable_rate_limits=1', '*AP*'], - ) - assert_line_in( - '*P*elaine@*S*', - ['COMMAND', '-n *P*elaine@*S*', '-Q bar', - '-c 5', '--flag', '--logfile=foo', - '-- .disable_rate_limits=1', '*AP*'], - ) - assert_line_in( - '*P*kramer@*S*', - ['COMMAND', '--loglevel=DEBUG', '-n *P*kramer@*S*', - '-Q bar', '--flag', '--logfile=foo', - '-- .disable_rate_limits=1', '*AP*'], - ) - expand = names[0][2] - self.assertEqual(expand('%h'), '*P*jerry@*S*') - self.assertEqual(expand('%n'), 'jerry') - names2 = list(multi_args(p, cmd='COMMAND', append='', - prefix='*P*', suffix='*S*')) - self.assertEqual(names2[0][1][-1], '-- .disable_rate_limits=1') - - gethostname.return_value = 'example.com' - p2 = NamespacedOptionParser(['10', '-c:1', '5']) - names3 = list(multi_args(p2, cmd='COMMAND')) - self.assertEqual(len(names3), 10) - self.assertEqual( - names3[0][0:2], - ('celery1@example.com', - ['COMMAND', '-n celery1@example.com', '-c 5', '']), - ) - for i, worker in enumerate(names3[1:]): - self.assertEqual( - worker[0:2], - ('celery%s@example.com' % (i + 2), - ['COMMAND', '-n celery%s@example.com' % (i + 2), '']), - ) - - names4 = list(multi_args(p2, cmd='COMMAND', suffix='""')) - self.assertEqual(len(names4), 10) - self.assertEqual( - names4[0][0:2], - ('celery1@', - ['COMMAND', '-n celery1@', '-c 5', '']), - ) - - p3 = NamespacedOptionParser(['foo@', '-c:foo', '5']) - names5 = list(multi_args(p3, cmd='COMMAND', suffix='""')) - self.assertEqual( - names5[0][0:2], - ('foo@', - ['COMMAND', '-n foo@', '-c 5', '']), - ) - - -class test_MultiTool(AppCase): - - def setup(self): - self.fh = WhateverIO() - self.env = {} - self.t = MultiTool(env=self.env, fh=self.fh) - - def test_note(self): - self.t.note('hello world') - self.assertEqual(self.fh.getvalue(), 'hello world\n') - - def test_note_quiet(self): - self.t.quiet = True - self.t.note('hello world') - self.assertFalse(self.fh.getvalue()) - - def test_info(self): - self.t.verbose = True - self.t.info('hello info') - self.assertEqual(self.fh.getvalue(), 'hello info\n') - - def test_info_not_verbose(self): - self.t.verbose = False - self.t.info('hello info') - self.assertFalse(self.fh.getvalue()) - - def test_error(self): - self.t.carp = Mock() - self.t.usage = Mock() - self.assertEqual(self.t.error('foo'), 1) - self.t.carp.assert_called_with('foo') - self.t.usage.assert_called_with() - - self.t.carp = Mock() - self.assertEqual(self.t.error(), 1) - self.assertFalse(self.t.carp.called) - - self.assertEqual(self.t.retcode, 1) - - @patch('celery.bin.multi.Popen') - def test_waitexec(self, Popen): - self.t.note = Mock() - pipe = Popen.return_value = Mock() - pipe.wait.return_value = -10 - self.assertEqual(self.t.waitexec(['-m', 'foo'], 'path'), 10) - Popen.assert_called_with(['path', '-m', 'foo'], env=self.t.env) - self.t.note.assert_called_with('* Child was terminated by signal 10') - - pipe.wait.return_value = 2 - self.assertEqual(self.t.waitexec(['-m', 'foo'], 'path'), 2) - self.t.note.assert_called_with( - '* Child terminated with errorcode 2', - ) - - pipe.wait.return_value = 0 - self.assertFalse(self.t.waitexec(['-m', 'foo', 'path'])) - - def test_nosplash(self): - self.t.nosplash = True - self.t.splash() - self.assertFalse(self.fh.getvalue()) - - def test_splash(self): - self.t.nosplash = False - self.t.splash() - self.assertIn('celery multi', self.fh.getvalue()) - - def test_usage(self): - self.t.usage() - self.assertTrue(self.fh.getvalue()) - - def test_help(self): - self.t.help([]) - self.assertIn(doc, self.fh.getvalue()) - - def test_expand(self): - self.t.expand(['foo%n', 'ask', 'klask', 'dask']) - self.assertEqual( - self.fh.getvalue(), 'fooask\nfooklask\nfoodask\n', - ) - - def test_restart(self): - stop = self.t._stop_nodes = Mock() - self.t.restart(['jerry', 'george'], 'celery worker') - waitexec = self.t.waitexec = Mock() - self.assertTrue(stop.called) - callback = stop.call_args[1]['callback'] - self.assertTrue(callback) - - waitexec.return_value = 0 - callback('jerry', ['arg'], 13) - waitexec.assert_called_with(['arg'], path=sys.executable) - self.assertIn('OK', self.fh.getvalue()) - self.fh.seek(0) - self.fh.truncate() - - waitexec.return_value = 1 - callback('jerry', ['arg'], 13) - self.assertIn('FAILED', self.fh.getvalue()) - - def test_stop(self): - self.t.getpids = Mock() - self.t.getpids.return_value = [2, 3, 4] - self.t.shutdown_nodes = Mock() - self.t.stop(['a', 'b', '-INT'], 'celery worker') - self.t.shutdown_nodes.assert_called_with( - [2, 3, 4], sig=signal.SIGINT, retry=None, callback=None, - - ) - - def test_kill(self): - if not hasattr(signal, 'SIGKILL'): - raise SkipTest('SIGKILL not supported by this platform') - self.t.getpids = Mock() - self.t.getpids.return_value = [ - ('a', None, 10), - ('b', None, 11), - ('c', None, 12) - ] - sig = self.t.signal_node = Mock() - - self.t.kill(['a', 'b', 'c'], 'celery worker') - - sigs = sig.call_args_list - self.assertEqual(len(sigs), 3) - self.assertEqual(sigs[0][0], ('a', 10, signal.SIGKILL)) - self.assertEqual(sigs[1][0], ('b', 11, signal.SIGKILL)) - self.assertEqual(sigs[2][0], ('c', 12, signal.SIGKILL)) - - def prepare_pidfile_for_getpids(self, Pidfile): - class pids(object): - - def __init__(self, path): - self.path = path - - def read_pid(self): - try: - return {'foo.pid': 10, - 'bar.pid': 11}[self.path] - except KeyError: - raise ValueError() - Pidfile.side_effect = pids - - @patch('celery.bin.multi.Pidfile') - @patch('socket.gethostname') - def test_getpids(self, gethostname, Pidfile): - gethostname.return_value = 'e.com' - self.prepare_pidfile_for_getpids(Pidfile) - callback = Mock() - - p = NamespacedOptionParser(['foo', 'bar', 'baz']) - nodes = self.t.getpids(p, 'celery worker', callback=callback) - node_0, node_1 = nodes - self.assertEqual(node_0[0], 'foo@e.com') - self.assertEqual( - sorted(node_0[1]), - sorted(('celery worker', '--pidfile=foo.pid', - '-n foo@e.com', '')), - ) - self.assertEqual(node_0[2], 10) - - self.assertEqual(node_1[0], 'bar@e.com') - self.assertEqual( - sorted(node_1[1]), - sorted(('celery worker', '--pidfile=bar.pid', - '-n bar@e.com', '')), - ) - self.assertEqual(node_1[2], 11) - self.assertTrue(callback.called) - cargs, _ = callback.call_args - self.assertEqual(cargs[0], 'baz@e.com') - self.assertItemsEqual( - cargs[1], - ['celery worker', '--pidfile=baz.pid', '-n baz@e.com', ''], - ) - self.assertIsNone(cargs[2]) - self.assertIn('DOWN', self.fh.getvalue()) - - # without callback, should work - nodes = self.t.getpids(p, 'celery worker', callback=None) - - @patch('celery.bin.multi.Pidfile') - @patch('socket.gethostname') - @patch('celery.bin.multi.sleep') - def test_shutdown_nodes(self, slepp, gethostname, Pidfile): - gethostname.return_value = 'e.com' - self.prepare_pidfile_for_getpids(Pidfile) - self.assertIsNone(self.t.shutdown_nodes([])) - self.t.signal_node = Mock() - node_alive = self.t.node_alive = Mock() - self.t.node_alive.return_value = False - - callback = Mock() - self.t.stop(['foo', 'bar', 'baz'], 'celery worker', callback=callback) - sigs = sorted(self.t.signal_node.call_args_list) - self.assertEqual(len(sigs), 2) - self.assertIn( - ('foo@e.com', 10, signal.SIGTERM), - [tup[0] for tup in sigs], - ) - self.assertIn( - ('bar@e.com', 11, signal.SIGTERM), - [tup[0] for tup in sigs], - ) - self.t.signal_node.return_value = False - self.assertTrue(callback.called) - self.t.stop(['foo', 'bar', 'baz'], 'celery worker', callback=None) - - def on_node_alive(pid): - if node_alive.call_count > 4: - return True - return False - self.t.signal_node.return_value = True - self.t.node_alive.side_effect = on_node_alive - self.t.stop(['foo', 'bar', 'baz'], 'celery worker', retry=True) - - @patch('os.kill') - def test_node_alive(self, kill): - kill.return_value = True - self.assertTrue(self.t.node_alive(13)) - esrch = OSError() - esrch.errno = errno.ESRCH - kill.side_effect = esrch - self.assertFalse(self.t.node_alive(13)) - kill.assert_called_with(13, 0) - - enoent = OSError() - enoent.errno = errno.ENOENT - kill.side_effect = enoent - with self.assertRaises(OSError): - self.t.node_alive(13) - - @patch('os.kill') - def test_signal_node(self, kill): - kill.return_value = True - self.assertTrue(self.t.signal_node('foo', 13, 9)) - esrch = OSError() - esrch.errno = errno.ESRCH - kill.side_effect = esrch - self.assertFalse(self.t.signal_node('foo', 13, 9)) - kill.assert_called_with(13, 9) - self.assertIn('Could not signal foo', self.fh.getvalue()) - - enoent = OSError() - enoent.errno = errno.ENOENT - kill.side_effect = enoent - with self.assertRaises(OSError): - self.t.signal_node('foo', 13, 9) - - def test_start(self): - self.t.waitexec = Mock() - self.t.waitexec.return_value = 0 - self.assertFalse(self.t.start(['foo', 'bar', 'baz'], 'celery worker')) - - self.t.waitexec.return_value = 1 - self.assertFalse(self.t.start(['foo', 'bar', 'baz'], 'celery worker')) - - def test_show(self): - self.t.show(['foo', 'bar', 'baz'], 'celery worker') - self.assertTrue(self.fh.getvalue()) - - @patch('socket.gethostname') - def test_get(self, gethostname): - gethostname.return_value = 'e.com' - self.t.get(['xuzzy@e.com', 'foo', 'bar', 'baz'], 'celery worker') - self.assertFalse(self.fh.getvalue()) - self.t.get(['foo@e.com', 'foo', 'bar', 'baz'], 'celery worker') - self.assertTrue(self.fh.getvalue()) - - @patch('socket.gethostname') - def test_names(self, gethostname): - gethostname.return_value = 'e.com' - self.t.names(['foo', 'bar', 'baz'], 'celery worker') - self.assertIn('foo@e.com\nbar@e.com\nbaz@e.com', self.fh.getvalue()) - - def test_execute_from_commandline(self): - start = self.t.commands['start'] = Mock() - self.t.error = Mock() - self.t.execute_from_commandline(['multi', 'start', 'foo', 'bar']) - self.assertFalse(self.t.error.called) - start.assert_called_with(['foo', 'bar'], 'celery worker') - - self.t.error = Mock() - self.t.execute_from_commandline(['multi', 'frob', 'foo', 'bar']) - self.t.error.assert_called_with('Invalid command: frob') - - self.t.error = Mock() - self.t.execute_from_commandline(['multi']) - self.t.error.assert_called_with() - - self.t.error = Mock() - self.t.execute_from_commandline(['multi', '-foo']) - self.t.error.assert_called_with() - - self.t.execute_from_commandline( - ['multi', 'start', 'foo', - '--nosplash', '--quiet', '-q', '--verbose', '--no-color'], - ) - self.assertTrue(self.t.nosplash) - self.assertTrue(self.t.quiet) - self.assertTrue(self.t.verbose) - self.assertTrue(self.t.no_color) - - def test_stopwait(self): - self.t._stop_nodes = Mock() - self.t.stopwait(['foo', 'bar', 'baz'], 'celery worker') - self.assertEqual(self.t._stop_nodes.call_args[1]['retry'], 2) - - @patch('celery.bin.multi.MultiTool') - def test_main(self, MultiTool): - m = MultiTool.return_value = Mock() - with self.assertRaises(SystemExit): - main() - m.execute_from_commandline.assert_called_with(sys.argv) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_worker.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_worker.py deleted file mode 100644 index bc63940..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_worker.py +++ /dev/null @@ -1,681 +0,0 @@ -from __future__ import absolute_import - -import logging -import os -import sys - -from billiard import current_process -from kombu import Exchange, Queue - -from celery import platforms -from celery import signals -from celery.app import trace -from celery.apps import worker as cd -from celery.bin.worker import worker, main as worker_main -from celery.exceptions import ( - ImproperlyConfigured, WorkerShutdown, WorkerTerminate, -) -from celery.utils.log import ensure_process_aware_logger -from celery.worker import state - -from celery.tests.case import ( - AppCase, - Mock, - SkipTest, - disable_stdouts, - patch, - skip_if_pypy, - skip_if_jython, -) - -ensure_process_aware_logger() - - -class WorkerAppCase(AppCase): - - def tearDown(self): - super(WorkerAppCase, self).tearDown() - trace.reset_worker_optimizations() - - -class Worker(cd.Worker): - redirect_stdouts = False - - def start(self, *args, **kwargs): - self.on_start() - - -class test_Worker(WorkerAppCase): - Worker = Worker - - @disable_stdouts - def test_queues_string(self): - w = self.app.Worker() - w.setup_queues('foo,bar,baz') - self.assertTrue('foo' in self.app.amqp.queues) - - @disable_stdouts - def test_cpu_count(self): - with patch('celery.worker.cpu_count') as cpu_count: - cpu_count.side_effect = NotImplementedError() - w = self.app.Worker(concurrency=None) - self.assertEqual(w.concurrency, 2) - w = self.app.Worker(concurrency=5) - self.assertEqual(w.concurrency, 5) - - @disable_stdouts - def test_windows_B_option(self): - self.app.IS_WINDOWS = True - with self.assertRaises(SystemExit): - worker(app=self.app).run(beat=True) - - def test_setup_concurrency_very_early(self): - x = worker() - x.run = Mock() - with self.assertRaises(ImportError): - x.execute_from_commandline(['worker', '-P', 'xyzybox']) - - def test_run_from_argv_basic(self): - x = worker(app=self.app) - x.run = Mock() - x.maybe_detach = Mock() - - def run(*args, **kwargs): - pass - x.run = run - x.run_from_argv('celery', []) - self.assertTrue(x.maybe_detach.called) - - def test_maybe_detach(self): - x = worker(app=self.app) - with patch('celery.bin.worker.detached_celeryd') as detached: - x.maybe_detach([]) - self.assertFalse(detached.called) - with self.assertRaises(SystemExit): - x.maybe_detach(['--detach']) - self.assertTrue(detached.called) - - @disable_stdouts - def test_invalid_loglevel_gives_error(self): - x = worker(app=self.app) - with self.assertRaises(SystemExit): - x.run(loglevel='GRIM_REAPER') - - def test_no_loglevel(self): - self.app.Worker = Mock() - worker(app=self.app).run(loglevel=None) - - def test_tasklist(self): - worker = self.app.Worker() - self.assertTrue(worker.app.tasks) - self.assertTrue(worker.app.finalized) - self.assertTrue(worker.tasklist(include_builtins=True)) - worker.tasklist(include_builtins=False) - - def test_extra_info(self): - worker = self.app.Worker() - worker.loglevel = logging.WARNING - self.assertFalse(worker.extra_info()) - worker.loglevel = logging.INFO - self.assertTrue(worker.extra_info()) - - @disable_stdouts - def test_loglevel_string(self): - worker = self.Worker(app=self.app, loglevel='INFO') - self.assertEqual(worker.loglevel, logging.INFO) - - @disable_stdouts - def test_run_worker(self): - handlers = {} - - class Signals(platforms.Signals): - - def __setitem__(self, sig, handler): - handlers[sig] = handler - - p = platforms.signals - platforms.signals = Signals() - try: - w = self.Worker(app=self.app) - w._isatty = False - w.on_start() - for sig in 'SIGINT', 'SIGHUP', 'SIGTERM': - self.assertIn(sig, handlers) - - handlers.clear() - w = self.Worker(app=self.app) - w._isatty = True - w.on_start() - for sig in 'SIGINT', 'SIGTERM': - self.assertIn(sig, handlers) - self.assertNotIn('SIGHUP', handlers) - finally: - platforms.signals = p - - @disable_stdouts - def test_startup_info(self): - worker = self.Worker(app=self.app) - worker.on_start() - self.assertTrue(worker.startup_info()) - worker.loglevel = logging.DEBUG - self.assertTrue(worker.startup_info()) - worker.loglevel = logging.INFO - self.assertTrue(worker.startup_info()) - worker.autoscale = 13, 10 - self.assertTrue(worker.startup_info()) - - prev_loader = self.app.loader - worker = self.Worker(app=self.app, queues='foo,bar,baz,xuzzy,do,re,mi') - self.app.loader = Mock() - self.app.loader.__module__ = 'acme.baked_beans' - self.assertTrue(worker.startup_info()) - - self.app.loader = Mock() - self.app.loader.__module__ = 'celery.loaders.foo' - self.assertTrue(worker.startup_info()) - - from celery.loaders.app import AppLoader - self.app.loader = AppLoader(app=self.app) - self.assertTrue(worker.startup_info()) - - self.app.loader = prev_loader - worker.send_events = True - self.assertTrue(worker.startup_info()) - - # test when there are too few output lines - # to draft the ascii art onto - prev, cd.ARTLINES = cd.ARTLINES, ['the quick brown fox'] - try: - self.assertTrue(worker.startup_info()) - finally: - cd.ARTLINES = prev - - @disable_stdouts - def test_run(self): - self.Worker(app=self.app).on_start() - self.Worker(app=self.app, purge=True).on_start() - worker = self.Worker(app=self.app) - worker.on_start() - - @disable_stdouts - def test_purge_messages(self): - self.Worker(app=self.app).purge_messages() - - @disable_stdouts - def test_init_queues(self): - app = self.app - c = app.conf - app.amqp.queues = app.amqp.Queues({ - 'celery': {'exchange': 'celery', - 'routing_key': 'celery'}, - 'video': {'exchange': 'video', - 'routing_key': 'video'}, - }) - worker = self.Worker(app=self.app) - worker.setup_queues(['video']) - self.assertIn('video', app.amqp.queues) - self.assertIn('video', app.amqp.queues.consume_from) - self.assertIn('celery', app.amqp.queues) - self.assertNotIn('celery', app.amqp.queues.consume_from) - - c.CELERY_CREATE_MISSING_QUEUES = False - del(app.amqp.queues) - with self.assertRaises(ImproperlyConfigured): - self.Worker(app=self.app).setup_queues(['image']) - del(app.amqp.queues) - c.CELERY_CREATE_MISSING_QUEUES = True - worker = self.Worker(app=self.app) - worker.setup_queues(['image']) - self.assertIn('image', app.amqp.queues.consume_from) - self.assertEqual( - Queue('image', Exchange('image'), routing_key='image'), - app.amqp.queues['image'], - ) - - @disable_stdouts - def test_autoscale_argument(self): - worker1 = self.Worker(app=self.app, autoscale='10,3') - self.assertListEqual(worker1.autoscale, [10, 3]) - worker2 = self.Worker(app=self.app, autoscale='10') - self.assertListEqual(worker2.autoscale, [10, 0]) - self.assert_no_logging_side_effect() - - def test_include_argument(self): - worker1 = self.Worker(app=self.app, include='os') - self.assertListEqual(worker1.include, ['os']) - worker2 = self.Worker(app=self.app, - include='os,sys') - self.assertListEqual(worker2.include, ['os', 'sys']) - self.Worker(app=self.app, include=['os', 'sys']) - - @disable_stdouts - def test_unknown_loglevel(self): - with self.assertRaises(SystemExit): - worker(app=self.app).run(loglevel='ALIEN') - worker1 = self.Worker(app=self.app, loglevel=0xFFFF) - self.assertEqual(worker1.loglevel, 0xFFFF) - - @disable_stdouts - @patch('os._exit') - def test_warns_if_running_as_privileged_user(self, _exit): - app = self.app - if app.IS_WINDOWS: - raise SkipTest('Not applicable on Windows') - - with patch('os.getuid') as getuid: - getuid.return_value = 0 - self.app.conf.CELERY_ACCEPT_CONTENT = ['pickle'] - worker = self.Worker(app=self.app) - worker.on_start() - _exit.assert_called_with(1) - from celery import platforms - platforms.C_FORCE_ROOT = True - try: - with self.assertWarnsRegex( - RuntimeWarning, - r'absolutely not recommended'): - worker = self.Worker(app=self.app) - worker.on_start() - finally: - platforms.C_FORCE_ROOT = False - self.app.conf.CELERY_ACCEPT_CONTENT = ['json'] - with self.assertWarnsRegex( - RuntimeWarning, - r'absolutely not recommended'): - worker = self.Worker(app=self.app) - worker.on_start() - - @disable_stdouts - def test_redirect_stdouts(self): - self.Worker(app=self.app, redirect_stdouts=False) - with self.assertRaises(AttributeError): - sys.stdout.logger - - @disable_stdouts - def test_on_start_custom_logging(self): - self.app.log.redirect_stdouts = Mock() - worker = self.Worker(app=self.app, redirect_stoutds=True) - worker._custom_logging = True - worker.on_start() - self.assertFalse(self.app.log.redirect_stdouts.called) - - def test_setup_logging_no_color(self): - worker = self.Worker( - app=self.app, redirect_stdouts=False, no_color=True, - ) - prev, self.app.log.setup = self.app.log.setup, Mock() - try: - worker.setup_logging() - self.assertFalse(self.app.log.setup.call_args[1]['colorize']) - finally: - self.app.log.setup = prev - - @disable_stdouts - def test_startup_info_pool_is_str(self): - worker = self.Worker(app=self.app, redirect_stdouts=False) - worker.pool_cls = 'foo' - worker.startup_info() - - def test_redirect_stdouts_already_handled(self): - logging_setup = [False] - - @signals.setup_logging.connect - def on_logging_setup(**kwargs): - logging_setup[0] = True - - try: - worker = self.Worker(app=self.app, redirect_stdouts=False) - worker.app.log.already_setup = False - worker.setup_logging() - self.assertTrue(logging_setup[0]) - with self.assertRaises(AttributeError): - sys.stdout.logger - finally: - signals.setup_logging.disconnect(on_logging_setup) - - @disable_stdouts - def test_platform_tweaks_osx(self): - - class OSXWorker(Worker): - proxy_workaround_installed = False - - def osx_proxy_detection_workaround(self): - self.proxy_workaround_installed = True - - worker = OSXWorker(app=self.app, redirect_stdouts=False) - - def install_HUP_nosupport(controller): - controller.hup_not_supported_installed = True - - class Controller(object): - pass - - prev = cd.install_HUP_not_supported_handler - cd.install_HUP_not_supported_handler = install_HUP_nosupport - try: - worker.app.IS_OSX = True - controller = Controller() - worker.install_platform_tweaks(controller) - self.assertTrue(controller.hup_not_supported_installed) - self.assertTrue(worker.proxy_workaround_installed) - finally: - cd.install_HUP_not_supported_handler = prev - - @disable_stdouts - def test_general_platform_tweaks(self): - - restart_worker_handler_installed = [False] - - def install_worker_restart_handler(worker): - restart_worker_handler_installed[0] = True - - class Controller(object): - pass - - prev = cd.install_worker_restart_handler - cd.install_worker_restart_handler = install_worker_restart_handler - try: - worker = self.Worker(app=self.app) - worker.app.IS_OSX = False - worker.install_platform_tweaks(Controller()) - self.assertTrue(restart_worker_handler_installed[0]) - finally: - cd.install_worker_restart_handler = prev - - @disable_stdouts - def test_on_consumer_ready(self): - worker_ready_sent = [False] - - @signals.worker_ready.connect - def on_worker_ready(**kwargs): - worker_ready_sent[0] = True - - self.Worker(app=self.app).on_consumer_ready(object()) - self.assertTrue(worker_ready_sent[0]) - - -class test_funs(WorkerAppCase): - - def test_active_thread_count(self): - self.assertTrue(cd.active_thread_count()) - - @disable_stdouts - def test_set_process_status(self): - try: - __import__('setproctitle') - except ImportError: - raise SkipTest('setproctitle not installed') - worker = Worker(app=self.app, hostname='xyzza') - prev1, sys.argv = sys.argv, ['Arg0'] - try: - st = worker.set_process_status('Running') - self.assertIn('celeryd', st) - self.assertIn('xyzza', st) - self.assertIn('Running', st) - prev2, sys.argv = sys.argv, ['Arg0', 'Arg1'] - try: - st = worker.set_process_status('Running') - self.assertIn('celeryd', st) - self.assertIn('xyzza', st) - self.assertIn('Running', st) - self.assertIn('Arg1', st) - finally: - sys.argv = prev2 - finally: - sys.argv = prev1 - - @disable_stdouts - def test_parse_options(self): - cmd = worker() - cmd.app = self.app - opts, args = cmd.parse_options('worker', ['--concurrency=512', - '--heartbeat-interval=10']) - self.assertEqual(opts.concurrency, 512) - self.assertEqual(opts.heartbeat_interval, 10) - - @disable_stdouts - def test_main(self): - p, cd.Worker = cd.Worker, Worker - s, sys.argv = sys.argv, ['worker', '--discard'] - try: - worker_main(app=self.app) - finally: - cd.Worker = p - sys.argv = s - - -class test_signal_handlers(WorkerAppCase): - - class _Worker(object): - stopped = False - terminated = False - - def stop(self, in_sighandler=False): - self.stopped = True - - def terminate(self, in_sighandler=False): - self.terminated = True - - def psig(self, fun, *args, **kwargs): - handlers = {} - - class Signals(platforms.Signals): - def __setitem__(self, sig, handler): - handlers[sig] = handler - - p, platforms.signals = platforms.signals, Signals() - try: - fun(*args, **kwargs) - return handlers - finally: - platforms.signals = p - - @disable_stdouts - def test_worker_int_handler(self): - worker = self._Worker() - handlers = self.psig(cd.install_worker_int_handler, worker) - next_handlers = {} - state.should_stop = False - state.should_terminate = False - - class Signals(platforms.Signals): - - def __setitem__(self, sig, handler): - next_handlers[sig] = handler - - with patch('celery.apps.worker.active_thread_count') as c: - c.return_value = 3 - p, platforms.signals = platforms.signals, Signals() - try: - handlers['SIGINT']('SIGINT', object()) - self.assertTrue(state.should_stop) - finally: - platforms.signals = p - state.should_stop = False - - try: - next_handlers['SIGINT']('SIGINT', object()) - self.assertTrue(state.should_terminate) - finally: - state.should_terminate = False - - with patch('celery.apps.worker.active_thread_count') as c: - c.return_value = 1 - p, platforms.signals = platforms.signals, Signals() - try: - with self.assertRaises(WorkerShutdown): - handlers['SIGINT']('SIGINT', object()) - finally: - platforms.signals = p - - with self.assertRaises(WorkerTerminate): - next_handlers['SIGINT']('SIGINT', object()) - - @disable_stdouts - def test_worker_int_handler_only_stop_MainProcess(self): - try: - import _multiprocessing # noqa - except ImportError: - raise SkipTest('only relevant for multiprocessing') - process = current_process() - name, process.name = process.name, 'OtherProcess' - with patch('celery.apps.worker.active_thread_count') as c: - c.return_value = 3 - try: - worker = self._Worker() - handlers = self.psig(cd.install_worker_int_handler, worker) - handlers['SIGINT']('SIGINT', object()) - self.assertTrue(state.should_stop) - finally: - process.name = name - state.should_stop = False - - with patch('celery.apps.worker.active_thread_count') as c: - c.return_value = 1 - try: - worker = self._Worker() - handlers = self.psig(cd.install_worker_int_handler, worker) - with self.assertRaises(WorkerShutdown): - handlers['SIGINT']('SIGINT', object()) - finally: - process.name = name - state.should_stop = False - - @disable_stdouts - def test_install_HUP_not_supported_handler(self): - worker = self._Worker() - handlers = self.psig(cd.install_HUP_not_supported_handler, worker) - handlers['SIGHUP']('SIGHUP', object()) - - @disable_stdouts - def test_worker_term_hard_handler_only_stop_MainProcess(self): - try: - import _multiprocessing # noqa - except ImportError: - raise SkipTest('only relevant for multiprocessing') - process = current_process() - name, process.name = process.name, 'OtherProcess' - try: - with patch('celery.apps.worker.active_thread_count') as c: - c.return_value = 3 - worker = self._Worker() - handlers = self.psig( - cd.install_worker_term_hard_handler, worker) - try: - handlers['SIGQUIT']('SIGQUIT', object()) - self.assertTrue(state.should_terminate) - finally: - state.should_terminate = False - with patch('celery.apps.worker.active_thread_count') as c: - c.return_value = 1 - worker = self._Worker() - handlers = self.psig( - cd.install_worker_term_hard_handler, worker) - with self.assertRaises(WorkerTerminate): - handlers['SIGQUIT']('SIGQUIT', object()) - finally: - process.name = name - - @disable_stdouts - def test_worker_term_handler_when_threads(self): - with patch('celery.apps.worker.active_thread_count') as c: - c.return_value = 3 - worker = self._Worker() - handlers = self.psig(cd.install_worker_term_handler, worker) - try: - handlers['SIGTERM']('SIGTERM', object()) - self.assertTrue(state.should_stop) - finally: - state.should_stop = False - - @disable_stdouts - def test_worker_term_handler_when_single_thread(self): - with patch('celery.apps.worker.active_thread_count') as c: - c.return_value = 1 - worker = self._Worker() - handlers = self.psig(cd.install_worker_term_handler, worker) - try: - with self.assertRaises(WorkerShutdown): - handlers['SIGTERM']('SIGTERM', object()) - finally: - state.should_stop = False - - @patch('sys.__stderr__') - @skip_if_pypy - @skip_if_jython - def test_worker_cry_handler(self, stderr): - handlers = self.psig(cd.install_cry_handler) - self.assertIsNone(handlers['SIGUSR1']('SIGUSR1', object())) - self.assertTrue(stderr.write.called) - - @disable_stdouts - def test_worker_term_handler_only_stop_MainProcess(self): - try: - import _multiprocessing # noqa - except ImportError: - raise SkipTest('only relevant for multiprocessing') - process = current_process() - name, process.name = process.name, 'OtherProcess' - try: - with patch('celery.apps.worker.active_thread_count') as c: - c.return_value = 3 - worker = self._Worker() - handlers = self.psig(cd.install_worker_term_handler, worker) - handlers['SIGTERM']('SIGTERM', object()) - self.assertTrue(state.should_stop) - with patch('celery.apps.worker.active_thread_count') as c: - c.return_value = 1 - worker = self._Worker() - handlers = self.psig(cd.install_worker_term_handler, worker) - with self.assertRaises(WorkerShutdown): - handlers['SIGTERM']('SIGTERM', object()) - finally: - process.name = name - state.should_stop = False - - @disable_stdouts - @patch('celery.platforms.close_open_fds') - @patch('atexit.register') - @patch('os.close') - def test_worker_restart_handler(self, _close, register, close_open): - if getattr(os, 'execv', None) is None: - raise SkipTest('platform does not have excv') - argv = [] - - def _execv(*args): - argv.extend(args) - - execv, os.execv = os.execv, _execv - try: - worker = self._Worker() - handlers = self.psig(cd.install_worker_restart_handler, worker) - handlers['SIGHUP']('SIGHUP', object()) - self.assertTrue(state.should_stop) - self.assertTrue(register.called) - callback = register.call_args[0][0] - callback() - self.assertTrue(argv) - finally: - os.execv = execv - state.should_stop = False - - @disable_stdouts - def test_worker_term_hard_handler_when_threaded(self): - with patch('celery.apps.worker.active_thread_count') as c: - c.return_value = 3 - worker = self._Worker() - handlers = self.psig(cd.install_worker_term_hard_handler, worker) - try: - handlers['SIGQUIT']('SIGQUIT', object()) - self.assertTrue(state.should_terminate) - finally: - state.should_terminate = False - - @disable_stdouts - def test_worker_term_hard_handler_when_single_threaded(self): - with patch('celery.apps.worker.active_thread_count') as c: - c.return_value = 1 - worker = self._Worker() - handlers = self.psig(cd.install_worker_term_hard_handler, worker) - with self.assertRaises(WorkerTerminate): - handlers['SIGQUIT']('SIGQUIT', object()) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/case.py b/thesisenv/lib/python3.6/site-packages/celery/tests/case.py deleted file mode 100644 index a9e65cd..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/case.py +++ /dev/null @@ -1,880 +0,0 @@ -from __future__ import absolute_import - -try: - import unittest # noqa - unittest.skip - from unittest.util import safe_repr, unorderable_list_difference -except AttributeError: - import unittest2 as unittest # noqa - from unittest2.util import safe_repr, unorderable_list_difference # noqa - -import importlib -import inspect -import logging -import numbers -import os -import platform -import re -import sys -import threading -import time -import types -import warnings - -from contextlib import contextmanager -from copy import deepcopy -from datetime import datetime, timedelta -from functools import partial, wraps -from types import ModuleType - -try: - from unittest import mock -except ImportError: - import mock # noqa -from nose import SkipTest -from kombu import Queue -from kombu.log import NullHandler -from kombu.utils import nested, symbol_by_name - -from celery import Celery -from celery.app import current_app -from celery.backends.cache import CacheBackend, DummyClient -from celery.exceptions import CDeprecationWarning, CPendingDeprecationWarning -from celery.five import ( - WhateverIO, builtins, items, reraise, - string_t, values, open_fqdn, -) -from celery.utils.functional import noop -from celery.utils.imports import qualname - -__all__ = [ - 'Case', 'AppCase', 'Mock', 'MagicMock', 'ANY', - 'patch', 'call', 'sentinel', 'skip_unless_module', - 'wrap_logger', 'with_environ', 'sleepdeprived', - 'skip_if_environ', 'todo', 'skip', 'skip_if', - 'skip_unless', 'mask_modules', 'override_stdouts', 'mock_module', - 'replace_module_value', 'sys_platform', 'reset_modules', - 'patch_modules', 'mock_context', 'mock_open', 'patch_many', - 'assert_signal_called', 'skip_if_pypy', - 'skip_if_jython', 'body_from_sig', 'restore_logging', -] -patch = mock.patch -call = mock.call -sentinel = mock.sentinel -MagicMock = mock.MagicMock -ANY = mock.ANY - -PY3 = sys.version_info[0] == 3 - -CASE_REDEFINES_SETUP = """\ -{name} (subclass of AppCase) redefines private "setUp", should be: "setup"\ -""" -CASE_REDEFINES_TEARDOWN = """\ -{name} (subclass of AppCase) redefines private "tearDown", \ -should be: "teardown"\ -""" -CASE_LOG_REDIRECT_EFFECT = """\ -Test {0} did not disable LoggingProxy for {1}\ -""" -CASE_LOG_LEVEL_EFFECT = """\ -Test {0} Modified the level of the root logger\ -""" -CASE_LOG_HANDLER_EFFECT = """\ -Test {0} Modified handlers for the root logger\ -""" - -CELERY_TEST_CONFIG = { - #: Don't want log output when running suite. - 'CELERYD_HIJACK_ROOT_LOGGER': False, - 'CELERY_SEND_TASK_ERROR_EMAILS': False, - 'CELERY_DEFAULT_QUEUE': 'testcelery', - 'CELERY_DEFAULT_EXCHANGE': 'testcelery', - 'CELERY_DEFAULT_ROUTING_KEY': 'testcelery', - 'CELERY_QUEUES': ( - Queue('testcelery', routing_key='testcelery'), - ), - 'CELERY_ENABLE_UTC': True, - 'CELERY_TIMEZONE': 'UTC', - 'CELERYD_LOG_COLOR': False, - - # Mongo results tests (only executed if installed and running) - 'CELERY_MONGODB_BACKEND_SETTINGS': { - 'host': os.environ.get('MONGO_HOST') or 'localhost', - 'port': os.environ.get('MONGO_PORT') or 27017, - 'database': os.environ.get('MONGO_DB') or 'celery_unittests', - 'taskmeta_collection': (os.environ.get('MONGO_TASKMETA_COLLECTION') or - 'taskmeta_collection'), - 'user': os.environ.get('MONGO_USER'), - 'password': os.environ.get('MONGO_PASSWORD'), - } -} - - -class Trap(object): - - def __getattr__(self, name): - raise RuntimeError('Test depends on current_app') - - -class UnitLogging(symbol_by_name(Celery.log_cls)): - - def __init__(self, *args, **kwargs): - super(UnitLogging, self).__init__(*args, **kwargs) - self.already_setup = True - - -def UnitApp(name=None, broker=None, backend=None, - set_as_current=False, log=UnitLogging, **kwargs): - - app = Celery(name or 'celery.tests', - broker=broker or 'memory://', - backend=backend or 'cache+memory://', - set_as_current=set_as_current, - log=log, - **kwargs) - app.add_defaults(deepcopy(CELERY_TEST_CONFIG)) - return app - - -class Mock(mock.Mock): - - def __init__(self, *args, **kwargs): - attrs = kwargs.pop('attrs', None) or {} - super(Mock, self).__init__(*args, **kwargs) - for attr_name, attr_value in items(attrs): - setattr(self, attr_name, attr_value) - - -class _ContextMock(Mock): - """Dummy class implementing __enter__ and __exit__ - as the with statement requires these to be implemented - in the class, not just the instance.""" - - def __enter__(self): - pass - - def __exit__(self, *exc_info): - pass - - -def ContextMock(*args, **kwargs): - obj = _ContextMock(*args, **kwargs) - obj.attach_mock(_ContextMock(), '__enter__') - obj.attach_mock(_ContextMock(), '__exit__') - obj.__enter__.return_value = obj - # if __exit__ return a value the exception is ignored, - # so it must return None here. - obj.__exit__.return_value = None - return obj - - -def _bind(f, o): - @wraps(f) - def bound_meth(*fargs, **fkwargs): - return f(o, *fargs, **fkwargs) - return bound_meth - - -if PY3: # pragma: no cover - def _get_class_fun(meth): - return meth -else: - def _get_class_fun(meth): - return meth.__func__ - - -class MockCallbacks(object): - - def __new__(cls, *args, **kwargs): - r = Mock(name=cls.__name__) - _get_class_fun(cls.__init__)(r, *args, **kwargs) - for key, value in items(vars(cls)): - if key not in ('__dict__', '__weakref__', '__new__', '__init__'): - if inspect.ismethod(value) or inspect.isfunction(value): - r.__getattr__(key).side_effect = _bind(value, r) - else: - r.__setattr__(key, value) - return r - - -def skip_unless_module(module): - - def _inner(fun): - - @wraps(fun) - def __inner(*args, **kwargs): - try: - importlib.import_module(module) - except ImportError: - raise SkipTest('Does not have %s' % (module, )) - - return fun(*args, **kwargs) - - return __inner - return _inner - - -# -- adds assertWarns from recent unittest2, not in Python 2.7. - -class _AssertRaisesBaseContext(object): - - def __init__(self, expected, test_case, callable_obj=None, - expected_regex=None): - self.expected = expected - self.failureException = test_case.failureException - self.obj_name = None - if isinstance(expected_regex, string_t): - expected_regex = re.compile(expected_regex) - self.expected_regex = expected_regex - - -def _is_magic_module(m): - # some libraries create custom module types that are lazily - # lodaded, e.g. Django installs some modules in sys.modules that - # will load _tkinter and other shit when touched. - - # pyflakes refuses to accept 'noqa' for this isinstance. - cls, modtype = type(m), types.ModuleType - try: - variables = vars(cls) - except TypeError: - return True - else: - return (cls is not modtype and ( - '__getattr__' in variables or - '__getattribute__' in variables)) - - -class _AssertWarnsContext(_AssertRaisesBaseContext): - """A context manager used to implement TestCase.assertWarns* methods.""" - - def __enter__(self): - # The __warningregistry__'s need to be in a pristine state for tests - # to work properly. - warnings.resetwarnings() - for v in list(values(sys.modules)): - # do not evaluate Django moved modules and other lazily - # initialized modules. - if v and not _is_magic_module(v): - # use raw __getattribute__ to protect even better from - # lazily loaded modules - try: - object.__getattribute__(v, '__warningregistry__') - except AttributeError: - pass - else: - object.__setattr__(v, '__warningregistry__', {}) - self.warnings_manager = warnings.catch_warnings(record=True) - self.warnings = self.warnings_manager.__enter__() - warnings.simplefilter('always', self.expected) - return self - - def __exit__(self, exc_type, exc_value, tb): - self.warnings_manager.__exit__(exc_type, exc_value, tb) - if exc_type is not None: - # let unexpected exceptions pass through - return - try: - exc_name = self.expected.__name__ - except AttributeError: - exc_name = str(self.expected) - first_matching = None - for m in self.warnings: - w = m.message - if not isinstance(w, self.expected): - continue - if first_matching is None: - first_matching = w - if (self.expected_regex is not None and - not self.expected_regex.search(str(w))): - continue - # store warning for later retrieval - self.warning = w - self.filename = m.filename - self.lineno = m.lineno - return - # Now we simply try to choose a helpful failure message - if first_matching is not None: - raise self.failureException( - '%r does not match %r' % ( - self.expected_regex.pattern, str(first_matching))) - if self.obj_name: - raise self.failureException( - '%s not triggered by %s' % (exc_name, self.obj_name)) - else: - raise self.failureException('%s not triggered' % exc_name) - - -class Case(unittest.TestCase): - - def assertWarns(self, expected_warning): - return _AssertWarnsContext(expected_warning, self, None) - - def assertWarnsRegex(self, expected_warning, expected_regex): - return _AssertWarnsContext(expected_warning, self, - None, expected_regex) - - @contextmanager - def assertDeprecated(self): - with self.assertWarnsRegex(CDeprecationWarning, - r'scheduled for removal'): - yield - - @contextmanager - def assertPendingDeprecation(self): - with self.assertWarnsRegex(CPendingDeprecationWarning, - r'scheduled for deprecation'): - yield - - def assertDictContainsSubset(self, expected, actual, msg=None): - missing, mismatched = [], [] - - for key, value in items(expected): - if key not in actual: - missing.append(key) - elif value != actual[key]: - mismatched.append('%s, expected: %s, actual: %s' % ( - safe_repr(key), safe_repr(value), - safe_repr(actual[key]))) - - if not (missing or mismatched): - return - - standard_msg = '' - if missing: - standard_msg = 'Missing: %s' % ','.join(map(safe_repr, missing)) - - if mismatched: - if standard_msg: - standard_msg += '; ' - standard_msg += 'Mismatched values: %s' % ( - ','.join(mismatched)) - - self.fail(self._formatMessage(msg, standard_msg)) - - def assertItemsEqual(self, expected_seq, actual_seq, msg=None): - missing = unexpected = None - try: - expected = sorted(expected_seq) - actual = sorted(actual_seq) - except TypeError: - # Unsortable items (example: set(), complex(), ...) - expected = list(expected_seq) - actual = list(actual_seq) - missing, unexpected = unorderable_list_difference( - expected, actual) - else: - return self.assertSequenceEqual(expected, actual, msg=msg) - - errors = [] - if missing: - errors.append( - 'Expected, but missing:\n %s' % (safe_repr(missing), ) - ) - if unexpected: - errors.append( - 'Unexpected, but present:\n %s' % (safe_repr(unexpected), ) - ) - if errors: - standardMsg = '\n'.join(errors) - self.fail(self._formatMessage(msg, standardMsg)) - - -def depends_on_current_app(fun): - if inspect.isclass(fun): - fun.contained = False - else: - @wraps(fun) - def __inner(self, *args, **kwargs): - self.app.set_current() - return fun(self, *args, **kwargs) - return __inner - - -class AppCase(Case): - contained = True - - def __init__(self, *args, **kwargs): - super(AppCase, self).__init__(*args, **kwargs) - if self.__class__.__dict__.get('setUp'): - raise RuntimeError( - CASE_REDEFINES_SETUP.format(name=qualname(self)), - ) - if self.__class__.__dict__.get('tearDown'): - raise RuntimeError( - CASE_REDEFINES_TEARDOWN.format(name=qualname(self)), - ) - - def Celery(self, *args, **kwargs): - return UnitApp(*args, **kwargs) - - def setUp(self): - self._threads_at_setup = list(threading.enumerate()) - from celery import _state - from celery import result - result.task_join_will_block = \ - _state.task_join_will_block = lambda: False - self._current_app = current_app() - self._default_app = _state.default_app - trap = Trap() - self._prev_tls = _state._tls - _state.set_default_app(trap) - - class NonTLS(object): - current_app = trap - _state._tls = NonTLS() - - self.app = self.Celery(set_as_current=False) - if not self.contained: - self.app.set_current() - root = logging.getLogger() - self.__rootlevel = root.level - self.__roothandlers = root.handlers - _state._set_task_join_will_block(False) - try: - self.setup() - except: - self._teardown_app() - raise - - def _teardown_app(self): - from celery.utils.log import LoggingProxy - assert sys.stdout - assert sys.stderr - assert sys.__stdout__ - assert sys.__stderr__ - this = self._get_test_name() - if isinstance(sys.stdout, (LoggingProxy, Mock)) or \ - isinstance(sys.__stdout__, (LoggingProxy, Mock)): - raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, 'stdout')) - if isinstance(sys.stderr, (LoggingProxy, Mock)) or \ - isinstance(sys.__stderr__, (LoggingProxy, Mock)): - raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, 'stderr')) - backend = self.app.__dict__.get('backend') - if backend is not None: - if isinstance(backend, CacheBackend): - if isinstance(backend.client, DummyClient): - backend.client.cache.clear() - backend._cache.clear() - from celery import _state - _state._set_task_join_will_block(False) - - _state.set_default_app(self._default_app) - _state._tls = self._prev_tls - _state._tls.current_app = self._current_app - if self.app is not self._current_app: - self.app.close() - self.app = None - self.assertEqual( - self._threads_at_setup, list(threading.enumerate()), - ) - - def _get_test_name(self): - return '.'.join([self.__class__.__name__, self._testMethodName]) - - def tearDown(self): - try: - self.teardown() - finally: - self._teardown_app() - self.assert_no_logging_side_effect() - - def assert_no_logging_side_effect(self): - this = self._get_test_name() - root = logging.getLogger() - if root.level != self.__rootlevel: - raise RuntimeError(CASE_LOG_LEVEL_EFFECT.format(this)) - if root.handlers != self.__roothandlers: - raise RuntimeError(CASE_LOG_HANDLER_EFFECT.format(this)) - - def setup(self): - pass - - def teardown(self): - pass - - -def get_handlers(logger): - return [h for h in logger.handlers if not isinstance(h, NullHandler)] - - -@contextmanager -def wrap_logger(logger, loglevel=logging.ERROR): - old_handlers = get_handlers(logger) - sio = WhateverIO() - siohandler = logging.StreamHandler(sio) - logger.handlers = [siohandler] - - try: - yield sio - finally: - logger.handlers = old_handlers - - -def with_environ(env_name, env_value): - - def _envpatched(fun): - - @wraps(fun) - def _patch_environ(*args, **kwargs): - prev_val = os.environ.get(env_name) - os.environ[env_name] = env_value - try: - return fun(*args, **kwargs) - finally: - os.environ[env_name] = prev_val or '' - - return _patch_environ - return _envpatched - - -def sleepdeprived(module=time): - - def _sleepdeprived(fun): - - @wraps(fun) - def __sleepdeprived(*args, **kwargs): - old_sleep = module.sleep - module.sleep = noop - try: - return fun(*args, **kwargs) - finally: - module.sleep = old_sleep - - return __sleepdeprived - - return _sleepdeprived - - -def skip_if_environ(env_var_name): - - def _wrap_test(fun): - - @wraps(fun) - def _skips_if_environ(*args, **kwargs): - if os.environ.get(env_var_name): - raise SkipTest('SKIP %s: %s set\n' % ( - fun.__name__, env_var_name)) - return fun(*args, **kwargs) - - return _skips_if_environ - - return _wrap_test - - -def _skip_test(reason, sign): - - def _wrap_test(fun): - - @wraps(fun) - def _skipped_test(*args, **kwargs): - raise SkipTest('%s: %s' % (sign, reason)) - - return _skipped_test - return _wrap_test - - -def todo(reason): - """TODO test decorator.""" - return _skip_test(reason, 'TODO') - - -def skip(reason): - """Skip test decorator.""" - return _skip_test(reason, 'SKIP') - - -def skip_if(predicate, reason): - """Skip test if predicate is :const:`True`.""" - - def _inner(fun): - return predicate and skip(reason)(fun) or fun - - return _inner - - -def skip_unless(predicate, reason): - """Skip test if predicate is :const:`False`.""" - return skip_if(not predicate, reason) - - -# Taken from -# http://bitbucket.org/runeh/snippets/src/tip/missing_modules.py -@contextmanager -def mask_modules(*modnames): - """Ban some modules from being importable inside the context - - For example: - - >>> with mask_modules('sys'): - ... try: - ... import sys - ... except ImportError: - ... print('sys not found') - sys not found - - >>> import sys # noqa - >>> sys.version - (2, 5, 2, 'final', 0) - - """ - - realimport = builtins.__import__ - - def myimp(name, *args, **kwargs): - if name in modnames: - raise ImportError('No module named %s' % name) - else: - return realimport(name, *args, **kwargs) - - builtins.__import__ = myimp - try: - yield True - finally: - builtins.__import__ = realimport - - -@contextmanager -def override_stdouts(): - """Override `sys.stdout` and `sys.stderr` with `WhateverIO`.""" - prev_out, prev_err = sys.stdout, sys.stderr - prev_rout, prev_rerr = sys.__stdout__, sys.__stderr__ - mystdout, mystderr = WhateverIO(), WhateverIO() - sys.stdout = sys.__stdout__ = mystdout - sys.stderr = sys.__stderr__ = mystderr - - try: - yield mystdout, mystderr - finally: - sys.stdout = prev_out - sys.stderr = prev_err - sys.__stdout__ = prev_rout - sys.__stderr__ = prev_rerr - - -def disable_stdouts(fun): - - @wraps(fun) - def disable(*args, **kwargs): - with override_stdouts(): - return fun(*args, **kwargs) - return disable - - -def _old_patch(module, name, mocked): - module = importlib.import_module(module) - - def _patch(fun): - - @wraps(fun) - def __patched(*args, **kwargs): - prev = getattr(module, name) - setattr(module, name, mocked) - try: - return fun(*args, **kwargs) - finally: - setattr(module, name, prev) - return __patched - return _patch - - -@contextmanager -def replace_module_value(module, name, value=None): - has_prev = hasattr(module, name) - prev = getattr(module, name, None) - if value: - setattr(module, name, value) - else: - try: - delattr(module, name) - except AttributeError: - pass - try: - yield - finally: - if prev is not None: - setattr(module, name, prev) - if not has_prev: - try: - delattr(module, name) - except AttributeError: - pass -pypy_version = partial( - replace_module_value, sys, 'pypy_version_info', -) -platform_pyimp = partial( - replace_module_value, platform, 'python_implementation', -) - - -@contextmanager -def sys_platform(value): - prev, sys.platform = sys.platform, value - try: - yield - finally: - sys.platform = prev - - -@contextmanager -def reset_modules(*modules): - prev = dict((k, sys.modules.pop(k)) for k in modules if k in sys.modules) - try: - yield - finally: - sys.modules.update(prev) - - -@contextmanager -def patch_modules(*modules): - prev = {} - for mod in modules: - prev[mod] = sys.modules.get(mod) - sys.modules[mod] = ModuleType(mod) - try: - yield - finally: - for name, mod in items(prev): - if mod is None: - sys.modules.pop(name, None) - else: - sys.modules[name] = mod - - -@contextmanager -def mock_module(*names): - prev = {} - - class MockModule(ModuleType): - - def __getattr__(self, attr): - setattr(self, attr, Mock()) - return ModuleType.__getattribute__(self, attr) - - mods = [] - for name in names: - try: - prev[name] = sys.modules[name] - except KeyError: - pass - mod = sys.modules[name] = MockModule(name) - mods.append(mod) - try: - yield mods - finally: - for name in names: - try: - sys.modules[name] = prev[name] - except KeyError: - try: - del(sys.modules[name]) - except KeyError: - pass - - -@contextmanager -def mock_context(mock, typ=Mock): - context = mock.return_value = Mock() - context.__enter__ = typ() - context.__exit__ = typ() - - def on_exit(*x): - if x[0]: - reraise(x[0], x[1], x[2]) - context.__exit__.side_effect = on_exit - context.__enter__.return_value = context - try: - yield context - finally: - context.reset() - - -@contextmanager -def mock_open(typ=WhateverIO, side_effect=None): - with patch(open_fqdn) as open_: - with mock_context(open_) as context: - if side_effect is not None: - context.__enter__.side_effect = side_effect - val = context.__enter__.return_value = typ() - val.__exit__ = Mock() - yield val - - -def patch_many(*targets): - return nested(*[patch(target) for target in targets]) - - -@contextmanager -def assert_signal_called(signal, **expected): - handler = Mock() - call_handler = partial(handler) - signal.connect(call_handler) - try: - yield handler - finally: - signal.disconnect(call_handler) - handler.assert_called_with(signal=signal, **expected) - - -def skip_if_pypy(fun): - - @wraps(fun) - def _inner(*args, **kwargs): - if getattr(sys, 'pypy_version_info', None): - raise SkipTest('does not work on PyPy') - return fun(*args, **kwargs) - return _inner - - -def skip_if_jython(fun): - - @wraps(fun) - def _inner(*args, **kwargs): - if sys.platform.startswith('java'): - raise SkipTest('does not work on Jython') - return fun(*args, **kwargs) - return _inner - - -def body_from_sig(app, sig, utc=True): - sig.freeze() - callbacks = sig.options.pop('link', None) - errbacks = sig.options.pop('link_error', None) - countdown = sig.options.pop('countdown', None) - if countdown: - eta = app.now() + timedelta(seconds=countdown) - else: - eta = sig.options.pop('eta', None) - if eta and isinstance(eta, datetime): - eta = eta.isoformat() - expires = sig.options.pop('expires', None) - if expires and isinstance(expires, numbers.Real): - expires = app.now() + timedelta(seconds=expires) - if expires and isinstance(expires, datetime): - expires = expires.isoformat() - return { - 'task': sig.task, - 'id': sig.id, - 'args': sig.args, - 'kwargs': sig.kwargs, - 'callbacks': [dict(s) for s in callbacks] if callbacks else None, - 'errbacks': [dict(s) for s in errbacks] if errbacks else None, - 'eta': eta, - 'utc': utc, - 'expires': expires, - } - - -@contextmanager -def restore_logging(): - outs = sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__ - root = logging.getLogger() - level = root.level - handlers = root.handlers - - try: - yield - finally: - sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__ = outs - root.level = level - root.handlers[:] = handlers diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_compat.py b/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_compat.py deleted file mode 100644 index 02c7f7d..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_compat.py +++ /dev/null @@ -1,82 +0,0 @@ -from __future__ import absolute_import - -from datetime import timedelta - -import sys -sys.modules.pop('celery.task', None) - -from celery.schedules import schedule # noqa -from celery.task import ( # noqa - periodic_task, - PeriodicTask -) -from celery.utils.timeutils import timedelta_seconds # noqa - -from celery.tests.case import AppCase, depends_on_current_app # noqa - - -class test_Task(AppCase): - - def test_base_task_inherits_magic_kwargs_from_app(self): - from celery.task import Task as OldTask - - class timkX(OldTask): - abstract = True - - with self.Celery(set_as_current=False, - accept_magic_kwargs=True) as app: - timkX.bind(app) - # see #918 - self.assertFalse(timkX.accept_magic_kwargs) - - from celery import Task as NewTask - - class timkY(NewTask): - abstract = True - - timkY.bind(app) - self.assertFalse(timkY.accept_magic_kwargs) - - -@depends_on_current_app -class test_periodic_tasks(AppCase): - - def setup(self): - @periodic_task(app=self.app, shared=False, - run_every=schedule(timedelta(hours=1), app=self.app)) - def my_periodic(): - pass - self.my_periodic = my_periodic - - def now(self): - return self.app.now() - - def test_must_have_run_every(self): - with self.assertRaises(NotImplementedError): - type('Foo', (PeriodicTask, ), {'__module__': __name__}) - - def test_remaining_estimate(self): - s = self.my_periodic.run_every - self.assertIsInstance( - s.remaining_estimate(s.maybe_make_aware(self.now())), - timedelta) - - def test_is_due_not_due(self): - due, remaining = self.my_periodic.run_every.is_due(self.now()) - self.assertFalse(due) - # This assertion may fail if executed in the - # first minute of an hour, thus 59 instead of 60 - self.assertGreater(remaining, 59) - - def test_is_due(self): - p = self.my_periodic - due, remaining = p.run_every.is_due( - self.now() - p.run_every.run_every, - ) - self.assertTrue(due) - self.assertEqual(remaining, - timedelta_seconds(p.run_every.run_every)) - - def test_schedule_repr(self): - p = self.my_periodic - self.assertTrue(repr(p.run_every)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_compat_utils.py b/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_compat_utils.py deleted file mode 100644 index b041a0b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_compat_utils.py +++ /dev/null @@ -1,50 +0,0 @@ -from __future__ import absolute_import - -import celery - -from celery.app.task import Task as ModernTask -from celery.task.base import Task as CompatTask - -from celery.tests.case import AppCase, depends_on_current_app - - -@depends_on_current_app -class test_MagicModule(AppCase): - - def test_class_property_set_without_type(self): - self.assertTrue(ModernTask.__dict__['app'].__get__(CompatTask())) - - def test_class_property_set_on_class(self): - self.assertIs(ModernTask.__dict__['app'].__set__(None, None), - ModernTask.__dict__['app']) - - def test_class_property_set(self): - - class X(CompatTask): - pass - ModernTask.__dict__['app'].__set__(X(), self.app) - self.assertIs(X.app, self.app) - - def test_dir(self): - self.assertTrue(dir(celery.messaging)) - - def test_direct(self): - self.assertTrue(celery.task) - - def test_app_attrs(self): - self.assertEqual(celery.task.control.broadcast, - celery.current_app.control.broadcast) - - def test_decorators_task(self): - @celery.decorators.task - def _test_decorators_task(): - pass - - self.assertTrue(_test_decorators_task.accept_magic_kwargs) - - def test_decorators_periodic_task(self): - @celery.decorators.periodic_task(run_every=3600) - def _test_decorators_ptask(): - pass - - self.assertTrue(_test_decorators_ptask.accept_magic_kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_decorators.py b/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_decorators.py deleted file mode 100644 index 9f5dff9..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_decorators.py +++ /dev/null @@ -1,39 +0,0 @@ -from __future__ import absolute_import - -import warnings - -from celery.task import base - -from celery.tests.case import AppCase, depends_on_current_app - - -def add(x, y): - return x + y - - -@depends_on_current_app -class test_decorators(AppCase): - - def test_task_alias(self): - from celery import task - self.assertTrue(task.__file__) - self.assertTrue(task(add)) - - def setup(self): - with warnings.catch_warnings(record=True): - from celery import decorators - self.decorators = decorators - - def assertCompatDecorator(self, decorator, type, **opts): - task = decorator(**opts)(add) - self.assertEqual(task(8, 8), 16) - self.assertTrue(task.accept_magic_kwargs) - self.assertIsInstance(task, type) - - def test_task(self): - self.assertCompatDecorator(self.decorators.task, base.BaseTask) - - def test_periodic_task(self): - self.assertCompatDecorator(self.decorators.periodic_task, - base.BaseTask, - run_every=1) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_http.py b/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_http.py deleted file mode 100644 index 08505f8..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_http.py +++ /dev/null @@ -1,158 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import, unicode_literals - -from contextlib import contextmanager -from functools import wraps -try: - from urllib import addinfourl -except ImportError: # py3k - from urllib.request import addinfourl # noqa - -from anyjson import dumps -from kombu.utils.encoding import from_utf8 - -from celery.five import WhateverIO, items -from celery.task import http -from celery.tests.case import AppCase, Case - - -@contextmanager -def mock_urlopen(response_method): - - urlopen = http.urlopen - - @wraps(urlopen) - def _mocked(url, *args, **kwargs): - response_data, headers = response_method(url) - return addinfourl(WhateverIO(response_data), headers, url) - - http.urlopen = _mocked - - try: - yield True - finally: - http.urlopen = urlopen - - -def _response(res): - return lambda r: (res, []) - - -def success_response(value): - return _response(dumps({'status': 'success', 'retval': value})) - - -def fail_response(reason): - return _response(dumps({'status': 'failure', 'reason': reason})) - - -def unknown_response(): - return _response(dumps({'status': 'u.u.u.u', 'retval': True})) - - -class test_encodings(Case): - - def test_utf8dict(self): - uk = 'foobar' - d = {'følelser ær langé': 'ærbadægzaå寨Å', - from_utf8(uk): from_utf8('xuzzybaz')} - - for key, value in items(http.utf8dict(items(d))): - self.assertIsInstance(key, str) - self.assertIsInstance(value, str) - - -class test_MutableURL(Case): - - def test_url_query(self): - url = http.MutableURL('http://example.com?x=10&y=20&z=Foo') - self.assertDictContainsSubset({'x': '10', - 'y': '20', - 'z': 'Foo'}, url.query) - url.query['name'] = 'George' - url = http.MutableURL(str(url)) - self.assertDictContainsSubset({'x': '10', - 'y': '20', - 'z': 'Foo', - 'name': 'George'}, url.query) - - def test_url_keeps_everything(self): - url = 'https://e.com:808/foo/bar#zeta?x=10&y=20' - url = http.MutableURL(url) - - self.assertEqual( - str(url).split('?')[0], - 'https://e.com:808/foo/bar#zeta', - ) - - def test___repr__(self): - url = http.MutableURL('http://e.com/foo/bar') - self.assertTrue(repr(url).startswith(' 50: - return True - raise err - finally: - called[0] += 1 - sock.return_value.bind.side_effect = effect - with Rdb(out=out): - pass diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_cursesmon.py b/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_cursesmon.py deleted file mode 100644 index c8e6151..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_cursesmon.py +++ /dev/null @@ -1,70 +0,0 @@ -from __future__ import absolute_import - -from celery.tests.case import AppCase, SkipTest - - -class MockWindow(object): - - def getmaxyx(self): - return self.y, self.x - - -class test_CursesDisplay(AppCase): - - def setup(self): - try: - import curses # noqa - except ImportError: - raise SkipTest('curses monitor requires curses') - - from celery.events import cursesmon - self.monitor = cursesmon.CursesMonitor(object(), app=self.app) - self.win = MockWindow() - self.monitor.win = self.win - - def test_format_row_with_default_widths(self): - self.win.x, self.win.y = 91, 24 - row = self.monitor.format_row( - '783da208-77d0-40ca-b3d6-37dd6dbb55d3', - 'task.task.task.task.task.task.task.task.task.tas', - 'workerworkerworkerworkerworkerworkerworkerworker', - '21:13:20', - 'SUCCESS') - self.assertEqual('783da208-77d0-40ca-b3d6-37dd6dbb55d3 ' - 'workerworker... task.task.[.]tas 21:13:20 SUCCESS ', - row) - - def test_format_row_with_truncated_uuid(self): - self.win.x, self.win.y = 80, 24 - row = self.monitor.format_row( - '783da208-77d0-40ca-b3d6-37dd6dbb55d3', - 'task.task.task.task.task.task.task.task.task.tas', - 'workerworkerworkerworkerworkerworkerworkerworker', - '21:13:20', - 'SUCCESS') - self.assertEqual('783da208-77d0-40ca-b3d... workerworker... ' - 'task.task.[.]tas 21:13:20 SUCCESS ', - row) - - def test_format_title_row(self): - self.win.x, self.win.y = 80, 24 - row = self.monitor.format_row('UUID', 'TASK', - 'WORKER', 'TIME', 'STATE') - self.assertEqual('UUID WORKER ' - 'TASK TIME STATE ', - row) - - def test_format_row_for_wide_screen_with_short_uuid(self): - self.win.x, self.win.y = 140, 24 - row = self.monitor.format_row( - '783da208-77d0-40ca-b3d6-37dd6dbb55d3', - 'task.task.task.task.task.task.task.task.task.tas', - 'workerworkerworkerworkerworkerworkerworkerworker', - '21:13:20', - 'SUCCESS') - self.assertEqual(136, len(row)) - self.assertEqual('783da208-77d0-40ca-b3d6-37dd6dbb55d3 ' - 'workerworkerworkerworkerworkerworker... ' - 'task.task.task.task.task.task.task.[.]tas ' - '21:13:20 SUCCESS ', - row) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_events.py b/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_events.py deleted file mode 100644 index 791f416..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_events.py +++ /dev/null @@ -1,260 +0,0 @@ -from __future__ import absolute_import - -import socket - -from celery.events import Event -from celery.tests.case import AppCase, Mock - - -class MockProducer(object): - raise_on_publish = False - - def __init__(self, *args, **kwargs): - self.sent = [] - - def publish(self, msg, *args, **kwargs): - if self.raise_on_publish: - raise KeyError() - self.sent.append(msg) - - def close(self): - pass - - def has_event(self, kind): - for event in self.sent: - if event['type'] == kind: - return event - return False - - -class test_Event(AppCase): - - def test_constructor(self): - event = Event('world war II') - self.assertEqual(event['type'], 'world war II') - self.assertTrue(event['timestamp']) - - -class test_EventDispatcher(AppCase): - - def test_redis_uses_fanout_exchange(self): - self.app.connection = Mock() - conn = self.app.connection.return_value = Mock() - conn.transport.driver_type = 'redis' - - dispatcher = self.app.events.Dispatcher(conn, enabled=False) - self.assertEqual(dispatcher.exchange.type, 'fanout') - - def test_others_use_topic_exchange(self): - self.app.connection = Mock() - conn = self.app.connection.return_value = Mock() - conn.transport.driver_type = 'amqp' - dispatcher = self.app.events.Dispatcher(conn, enabled=False) - self.assertEqual(dispatcher.exchange.type, 'topic') - - def test_takes_channel_connection(self): - x = self.app.events.Dispatcher(channel=Mock()) - self.assertIs(x.connection, x.channel.connection.client) - - def test_sql_transports_disabled(self): - conn = Mock() - conn.transport.driver_type = 'sql' - x = self.app.events.Dispatcher(connection=conn) - self.assertFalse(x.enabled) - - def test_send(self): - producer = MockProducer() - producer.connection = self.app.connection() - connection = Mock() - connection.transport.driver_type = 'amqp' - eventer = self.app.events.Dispatcher(connection, enabled=False, - buffer_while_offline=False) - eventer.producer = producer - eventer.enabled = True - eventer.send('World War II', ended=True) - self.assertTrue(producer.has_event('World War II')) - eventer.enabled = False - eventer.send('World War III') - self.assertFalse(producer.has_event('World War III')) - - evs = ('Event 1', 'Event 2', 'Event 3') - eventer.enabled = True - eventer.producer.raise_on_publish = True - eventer.buffer_while_offline = False - with self.assertRaises(KeyError): - eventer.send('Event X') - eventer.buffer_while_offline = True - for ev in evs: - eventer.send(ev) - eventer.producer.raise_on_publish = False - eventer.flush() - for ev in evs: - self.assertTrue(producer.has_event(ev)) - - buf = eventer._outbound_buffer = Mock() - buf.popleft.side_effect = IndexError() - eventer.flush() - - def test_enter_exit(self): - with self.app.connection() as conn: - d = self.app.events.Dispatcher(conn) - d.close = Mock() - with d as _d: - self.assertTrue(_d) - d.close.assert_called_with() - - def test_enable_disable_callbacks(self): - on_enable = Mock() - on_disable = Mock() - with self.app.connection() as conn: - with self.app.events.Dispatcher(conn, enabled=False) as d: - d.on_enabled.add(on_enable) - d.on_disabled.add(on_disable) - d.enable() - on_enable.assert_called_with() - d.disable() - on_disable.assert_called_with() - - def test_enabled_disable(self): - connection = self.app.connection() - channel = connection.channel() - try: - dispatcher = self.app.events.Dispatcher(connection, - enabled=True) - dispatcher2 = self.app.events.Dispatcher(connection, - enabled=True, - channel=channel) - self.assertTrue(dispatcher.enabled) - self.assertTrue(dispatcher.producer.channel) - self.assertEqual(dispatcher.producer.serializer, - self.app.conf.CELERY_EVENT_SERIALIZER) - - created_channel = dispatcher.producer.channel - dispatcher.disable() - dispatcher.disable() # Disable with no active producer - dispatcher2.disable() - self.assertFalse(dispatcher.enabled) - self.assertIsNone(dispatcher.producer) - self.assertFalse(dispatcher2.channel.closed, - 'does not close manually provided channel') - - dispatcher.enable() - self.assertTrue(dispatcher.enabled) - self.assertTrue(dispatcher.producer) - - # XXX test compat attribute - self.assertIs(dispatcher.publisher, dispatcher.producer) - prev, dispatcher.publisher = dispatcher.producer, 42 - try: - self.assertEqual(dispatcher.producer, 42) - finally: - dispatcher.producer = prev - finally: - channel.close() - connection.close() - self.assertTrue(created_channel.closed) - - -class test_EventReceiver(AppCase): - - def test_process(self): - - message = {'type': 'world-war'} - - got_event = [False] - - def my_handler(event): - got_event[0] = True - - connection = Mock() - connection.transport_cls = 'memory' - r = self.app.events.Receiver( - connection, - handlers={'world-war': my_handler}, - node_id='celery.tests', - ) - r._receive(message, object()) - self.assertTrue(got_event[0]) - - def test_catch_all_event(self): - - message = {'type': 'world-war'} - - got_event = [False] - - def my_handler(event): - got_event[0] = True - - connection = Mock() - connection.transport_cls = 'memory' - r = self.app.events.Receiver(connection, node_id='celery.tests') - r.handlers['*'] = my_handler - r._receive(message, object()) - self.assertTrue(got_event[0]) - - def test_itercapture(self): - connection = self.app.connection() - try: - r = self.app.events.Receiver(connection, node_id='celery.tests') - it = r.itercapture(timeout=0.0001, wakeup=False) - - with self.assertRaises(socket.timeout): - next(it) - - with self.assertRaises(socket.timeout): - r.capture(timeout=0.00001) - finally: - connection.close() - - def test_event_from_message_localize_disabled(self): - r = self.app.events.Receiver(Mock(), node_id='celery.tests') - r.adjust_clock = Mock() - ts_adjust = Mock() - - r.event_from_message( - {'type': 'worker-online', 'clock': 313}, - localize=False, - adjust_timestamp=ts_adjust, - ) - self.assertFalse(ts_adjust.called) - r.adjust_clock.assert_called_with(313) - - def test_itercapture_limit(self): - connection = self.app.connection() - channel = connection.channel() - try: - events_received = [0] - - def handler(event): - events_received[0] += 1 - - producer = self.app.events.Dispatcher( - connection, enabled=True, channel=channel, - ) - r = self.app.events.Receiver( - connection, - handlers={'*': handler}, - node_id='celery.tests', - ) - evs = ['ev1', 'ev2', 'ev3', 'ev4', 'ev5'] - for ev in evs: - producer.send(ev) - it = r.itercapture(limit=4, wakeup=True) - next(it) # skip consumer (see itercapture) - list(it) - self.assertEqual(events_received[0], 4) - finally: - channel.close() - connection.close() - - -class test_misc(AppCase): - - def test_State(self): - state = self.app.events.State() - self.assertDictEqual(dict(state.workers), {}) - - def test_default_dispatcher(self): - with self.app.events.default_dispatcher() as d: - self.assertTrue(d) - self.assertTrue(d.connection) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_snapshot.py b/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_snapshot.py deleted file mode 100644 index f551751..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_snapshot.py +++ /dev/null @@ -1,130 +0,0 @@ -from __future__ import absolute_import - -from celery.events import Events -from celery.events.snapshot import Polaroid, evcam -from celery.tests.case import AppCase, patch, restore_logging - - -class TRef(object): - active = True - called = False - - def __call__(self): - self.called = True - - def cancel(self): - self.active = False - - -class MockTimer(object): - installed = [] - - def call_repeatedly(self, secs, fun, *args, **kwargs): - self.installed.append(fun) - return TRef() -timer = MockTimer() - - -class test_Polaroid(AppCase): - - def setup(self): - self.state = self.app.events.State() - - def test_constructor(self): - x = Polaroid(self.state, app=self.app) - self.assertIs(x.app, self.app) - self.assertIs(x.state, self.state) - self.assertTrue(x.freq) - self.assertTrue(x.cleanup_freq) - self.assertTrue(x.logger) - self.assertFalse(x.maxrate) - - def test_install_timers(self): - x = Polaroid(self.state, app=self.app) - x.timer = timer - x.__exit__() - x.__enter__() - self.assertIn(x.capture, MockTimer.installed) - self.assertIn(x.cleanup, MockTimer.installed) - self.assertTrue(x._tref.active) - self.assertTrue(x._ctref.active) - x.__exit__() - self.assertFalse(x._tref.active) - self.assertFalse(x._ctref.active) - self.assertTrue(x._tref.called) - self.assertFalse(x._ctref.called) - - def test_cleanup(self): - x = Polaroid(self.state, app=self.app) - cleanup_signal_sent = [False] - - def handler(**kwargs): - cleanup_signal_sent[0] = True - - x.cleanup_signal.connect(handler) - x.cleanup() - self.assertTrue(cleanup_signal_sent[0]) - - def test_shutter__capture(self): - x = Polaroid(self.state, app=self.app) - shutter_signal_sent = [False] - - def handler(**kwargs): - shutter_signal_sent[0] = True - - x.shutter_signal.connect(handler) - x.shutter() - self.assertTrue(shutter_signal_sent[0]) - - shutter_signal_sent[0] = False - x.capture() - self.assertTrue(shutter_signal_sent[0]) - - def test_shutter_maxrate(self): - x = Polaroid(self.state, app=self.app, maxrate='1/h') - shutter_signal_sent = [0] - - def handler(**kwargs): - shutter_signal_sent[0] += 1 - - x.shutter_signal.connect(handler) - for i in range(30): - x.shutter() - x.shutter() - x.shutter() - self.assertEqual(shutter_signal_sent[0], 1) - - -class test_evcam(AppCase): - - class MockReceiver(object): - raise_keyboard_interrupt = False - - def capture(self, **kwargs): - if self.__class__.raise_keyboard_interrupt: - raise KeyboardInterrupt() - - class MockEvents(Events): - - def Receiver(self, *args, **kwargs): - return test_evcam.MockReceiver() - - def setup(self): - self.app.events = self.MockEvents() - self.app.events.app = self.app - - def test_evcam(self): - with restore_logging(): - evcam(Polaroid, timer=timer, app=self.app) - evcam(Polaroid, timer=timer, loglevel='CRITICAL', app=self.app) - self.MockReceiver.raise_keyboard_interrupt = True - try: - with self.assertRaises(SystemExit): - evcam(Polaroid, timer=timer, app=self.app) - finally: - self.MockReceiver.raise_keyboard_interrupt = False - - @patch('celery.platforms.create_pidlock') - def test_evcam_pidfile(self, create_pidlock): - evcam(Polaroid, timer=timer, pidfile='/var/pid', app=self.app) - create_pidlock.assert_called_with('/var/pid') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_state.py b/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_state.py deleted file mode 100644 index aab54c4..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_state.py +++ /dev/null @@ -1,582 +0,0 @@ -from __future__ import absolute_import - -import pickle - -from decimal import Decimal -from random import shuffle -from time import time -from itertools import count - -from celery import states -from celery.events import Event -from celery.events.state import ( - State, - Worker, - Task, - HEARTBEAT_EXPIRE_WINDOW, - HEARTBEAT_DRIFT_MAX, -) -from celery.five import range -from celery.utils import uuid -from celery.tests.case import AppCase, Mock, patch - -try: - Decimal(2.6) -except TypeError: # pragma: no cover - # Py2.6: Must first convert float to str - _float_to_decimal = str -else: - def _float_to_decimal(f): # noqa - return f - - -class replay(object): - - def __init__(self, state): - self.state = state - self.rewind() - self.setup() - self.current_clock = 0 - - def setup(self): - pass - - def next_event(self): - ev = self.events[next(self.position)] - ev['local_received'] = ev['timestamp'] - try: - self.current_clock = ev['clock'] - except KeyError: - ev['clock'] = self.current_clock = self.current_clock + 1 - return ev - - def __iter__(self): - return self - - def __next__(self): - try: - self.state.event(self.next_event()) - except IndexError: - raise StopIteration() - next = __next__ - - def rewind(self): - self.position = count(0) - return self - - def play(self): - for _ in self: - pass - - -class ev_worker_online_offline(replay): - - def setup(self): - self.events = [ - Event('worker-online', hostname='utest1'), - Event('worker-offline', hostname='utest1'), - ] - - -class ev_worker_heartbeats(replay): - - def setup(self): - self.events = [ - Event('worker-heartbeat', hostname='utest1', - timestamp=time() - HEARTBEAT_EXPIRE_WINDOW * 2), - Event('worker-heartbeat', hostname='utest1'), - ] - - -class ev_task_states(replay): - - def setup(self): - tid = self.tid = uuid() - self.events = [ - Event('task-received', uuid=tid, name='task1', - args='(2, 2)', kwargs="{'foo': 'bar'}", - retries=0, eta=None, hostname='utest1'), - Event('task-started', uuid=tid, hostname='utest1'), - Event('task-revoked', uuid=tid, hostname='utest1'), - Event('task-retried', uuid=tid, exception="KeyError('bar')", - traceback='line 2 at main', hostname='utest1'), - Event('task-failed', uuid=tid, exception="KeyError('foo')", - traceback='line 1 at main', hostname='utest1'), - Event('task-succeeded', uuid=tid, result='4', - runtime=0.1234, hostname='utest1'), - ] - - -def QTEV(type, uuid, hostname, clock, name=None, timestamp=None): - """Quick task event.""" - return Event('task-{0}'.format(type), uuid=uuid, hostname=hostname, - clock=clock, name=name, timestamp=timestamp or time()) - - -class ev_logical_clock_ordering(replay): - - def __init__(self, state, offset=0, uids=None): - self.offset = offset or 0 - self.uids = self.setuids(uids) - super(ev_logical_clock_ordering, self).__init__(state) - - def setuids(self, uids): - uids = self.tA, self.tB, self.tC = uids or [uuid(), uuid(), uuid()] - return uids - - def setup(self): - offset = self.offset - tA, tB, tC = self.uids - self.events = [ - QTEV('received', tA, 'w1', name='tA', clock=offset + 1), - QTEV('received', tB, 'w2', name='tB', clock=offset + 1), - QTEV('started', tA, 'w1', name='tA', clock=offset + 3), - QTEV('received', tC, 'w2', name='tC', clock=offset + 3), - QTEV('started', tB, 'w2', name='tB', clock=offset + 5), - QTEV('retried', tA, 'w1', name='tA', clock=offset + 7), - QTEV('succeeded', tB, 'w2', name='tB', clock=offset + 9), - QTEV('started', tC, 'w2', name='tC', clock=offset + 10), - QTEV('received', tA, 'w3', name='tA', clock=offset + 13), - QTEV('succeded', tC, 'w2', name='tC', clock=offset + 12), - QTEV('started', tA, 'w3', name='tA', clock=offset + 14), - QTEV('succeeded', tA, 'w3', name='TA', clock=offset + 16), - ] - - def rewind_with_offset(self, offset, uids=None): - self.offset = offset - self.uids = self.setuids(uids or self.uids) - self.setup() - self.rewind() - - -class ev_snapshot(replay): - - def setup(self): - self.events = [ - Event('worker-online', hostname='utest1'), - Event('worker-online', hostname='utest2'), - Event('worker-online', hostname='utest3'), - ] - for i in range(20): - worker = not i % 2 and 'utest2' or 'utest1' - type = not i % 2 and 'task2' or 'task1' - self.events.append(Event('task-received', name=type, - uuid=uuid(), hostname=worker)) - - -class test_Worker(AppCase): - - def test_equality(self): - self.assertEqual(Worker(hostname='foo').hostname, 'foo') - self.assertEqual( - Worker(hostname='foo'), Worker(hostname='foo'), - ) - self.assertNotEqual( - Worker(hostname='foo'), Worker(hostname='bar'), - ) - self.assertEqual( - hash(Worker(hostname='foo')), hash(Worker(hostname='foo')), - ) - self.assertNotEqual( - hash(Worker(hostname='foo')), hash(Worker(hostname='bar')), - ) - - def test_compatible_with_Decimal(self): - w = Worker('george@vandelay.com') - timestamp, local_received = Decimal(_float_to_decimal(time())), time() - w.event('worker-online', timestamp, local_received, fields={ - 'hostname': 'george@vandelay.com', - 'timestamp': timestamp, - 'local_received': local_received, - 'freq': Decimal(_float_to_decimal(5.6335431)), - }) - self.assertTrue(w.alive) - - def test_survives_missing_timestamp(self): - worker = Worker(hostname='foo') - worker.event('heartbeat') - self.assertEqual(worker.heartbeats, []) - - def test_repr(self): - self.assertTrue(repr(Worker(hostname='foo'))) - - def test_drift_warning(self): - worker = Worker(hostname='foo') - with patch('celery.events.state.warn') as warn: - worker.event(None, time() + (HEARTBEAT_DRIFT_MAX * 2), time()) - self.assertTrue(warn.called) - self.assertIn('Substantial drift', warn.call_args[0][0]) - - def test_updates_heartbeat(self): - worker = Worker(hostname='foo') - worker.event(None, time(), time()) - self.assertEqual(len(worker.heartbeats), 1) - h1 = worker.heartbeats[0] - worker.event(None, time(), time() - 10) - self.assertEqual(len(worker.heartbeats), 2) - self.assertEqual(worker.heartbeats[-1], h1) - - -class test_Task(AppCase): - - def test_equality(self): - self.assertEqual(Task(uuid='foo').uuid, 'foo') - self.assertEqual( - Task(uuid='foo'), Task(uuid='foo'), - ) - self.assertNotEqual( - Task(uuid='foo'), Task(uuid='bar'), - ) - self.assertEqual( - hash(Task(uuid='foo')), hash(Task(uuid='foo')), - ) - self.assertNotEqual( - hash(Task(uuid='foo')), hash(Task(uuid='bar')), - ) - - def test_info(self): - task = Task(uuid='abcdefg', - name='tasks.add', - args='(2, 2)', - kwargs='{}', - retries=2, - result=42, - eta=1, - runtime=0.0001, - expires=1, - foo=None, - exception=1, - received=time() - 10, - started=time() - 8, - exchange='celery', - routing_key='celery', - succeeded=time()) - self.assertEqual(sorted(list(task._info_fields)), - sorted(task.info().keys())) - - self.assertEqual(sorted(list(task._info_fields + ('received', ))), - sorted(task.info(extra=('received', )))) - - self.assertEqual(sorted(['args', 'kwargs']), - sorted(task.info(['args', 'kwargs']).keys())) - self.assertFalse(list(task.info('foo'))) - - def test_ready(self): - task = Task(uuid='abcdefg', - name='tasks.add') - task.event('received', time(), time()) - self.assertFalse(task.ready) - task.event('succeeded', time(), time()) - self.assertTrue(task.ready) - - def test_sent(self): - task = Task(uuid='abcdefg', - name='tasks.add') - task.event('sent', time(), time()) - self.assertEqual(task.state, states.PENDING) - - def test_merge(self): - task = Task() - task.event('failed', time(), time()) - task.event('started', time(), time()) - task.event('received', time(), time(), { - 'name': 'tasks.add', 'args': (2, 2), - }) - self.assertEqual(task.state, states.FAILURE) - self.assertEqual(task.name, 'tasks.add') - self.assertTupleEqual(task.args, (2, 2)) - task.event('retried', time(), time()) - self.assertEqual(task.state, states.RETRY) - - def test_repr(self): - self.assertTrue(repr(Task(uuid='xxx', name='tasks.add'))) - - -class test_State(AppCase): - - def test_repr(self): - self.assertTrue(repr(State())) - - def test_pickleable(self): - self.assertTrue(pickle.loads(pickle.dumps(State()))) - - def test_task_logical_clock_ordering(self): - state = State() - r = ev_logical_clock_ordering(state) - tA, tB, tC = r.uids - r.play() - now = list(state.tasks_by_time()) - self.assertEqual(now[0][0], tA) - self.assertEqual(now[1][0], tC) - self.assertEqual(now[2][0], tB) - for _ in range(1000): - shuffle(r.uids) - tA, tB, tC = r.uids - r.rewind_with_offset(r.current_clock + 1, r.uids) - r.play() - now = list(state.tasks_by_time()) - self.assertEqual(now[0][0], tA) - self.assertEqual(now[1][0], tC) - self.assertEqual(now[2][0], tB) - - def test_worker_online_offline(self): - r = ev_worker_online_offline(State()) - next(r) - self.assertTrue(r.state.alive_workers()) - self.assertTrue(r.state.workers['utest1'].alive) - r.play() - self.assertFalse(r.state.alive_workers()) - self.assertFalse(r.state.workers['utest1'].alive) - - def test_itertasks(self): - s = State() - s.tasks = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} - self.assertEqual(len(list(s.itertasks(limit=2))), 2) - - def test_worker_heartbeat_expire(self): - r = ev_worker_heartbeats(State()) - next(r) - self.assertFalse(r.state.alive_workers()) - self.assertFalse(r.state.workers['utest1'].alive) - r.play() - self.assertTrue(r.state.alive_workers()) - self.assertTrue(r.state.workers['utest1'].alive) - - def test_task_states(self): - r = ev_task_states(State()) - - # RECEIVED - next(r) - self.assertTrue(r.tid in r.state.tasks) - task = r.state.tasks[r.tid] - self.assertEqual(task.state, states.RECEIVED) - self.assertTrue(task.received) - self.assertEqual(task.timestamp, task.received) - self.assertEqual(task.worker.hostname, 'utest1') - - # STARTED - next(r) - self.assertTrue(r.state.workers['utest1'].alive, - 'any task event adds worker heartbeat') - self.assertEqual(task.state, states.STARTED) - self.assertTrue(task.started) - self.assertEqual(task.timestamp, task.started) - self.assertEqual(task.worker.hostname, 'utest1') - - # REVOKED - next(r) - self.assertEqual(task.state, states.REVOKED) - self.assertTrue(task.revoked) - self.assertEqual(task.timestamp, task.revoked) - self.assertEqual(task.worker.hostname, 'utest1') - - # RETRY - next(r) - self.assertEqual(task.state, states.RETRY) - self.assertTrue(task.retried) - self.assertEqual(task.timestamp, task.retried) - self.assertEqual(task.worker.hostname, 'utest1') - self.assertEqual(task.exception, "KeyError('bar')") - self.assertEqual(task.traceback, 'line 2 at main') - - # FAILURE - next(r) - self.assertEqual(task.state, states.FAILURE) - self.assertTrue(task.failed) - self.assertEqual(task.timestamp, task.failed) - self.assertEqual(task.worker.hostname, 'utest1') - self.assertEqual(task.exception, "KeyError('foo')") - self.assertEqual(task.traceback, 'line 1 at main') - - # SUCCESS - next(r) - self.assertEqual(task.state, states.SUCCESS) - self.assertTrue(task.succeeded) - self.assertEqual(task.timestamp, task.succeeded) - self.assertEqual(task.worker.hostname, 'utest1') - self.assertEqual(task.result, '4') - self.assertEqual(task.runtime, 0.1234) - - def assertStateEmpty(self, state): - self.assertFalse(state.tasks) - self.assertFalse(state.workers) - self.assertFalse(state.event_count) - self.assertFalse(state.task_count) - - def assertState(self, state): - self.assertTrue(state.tasks) - self.assertTrue(state.workers) - self.assertTrue(state.event_count) - self.assertTrue(state.task_count) - - def test_freeze_while(self): - s = State() - r = ev_snapshot(s) - r.play() - - def work(): - pass - - s.freeze_while(work, clear_after=True) - self.assertFalse(s.event_count) - - s2 = State() - r = ev_snapshot(s2) - r.play() - s2.freeze_while(work, clear_after=False) - self.assertTrue(s2.event_count) - - def test_clear_tasks(self): - s = State() - r = ev_snapshot(s) - r.play() - self.assertTrue(s.tasks) - s.clear_tasks(ready=False) - self.assertFalse(s.tasks) - - def test_clear(self): - r = ev_snapshot(State()) - r.play() - self.assertTrue(r.state.event_count) - self.assertTrue(r.state.workers) - self.assertTrue(r.state.tasks) - self.assertTrue(r.state.task_count) - - r.state.clear() - self.assertFalse(r.state.event_count) - self.assertFalse(r.state.workers) - self.assertTrue(r.state.tasks) - self.assertFalse(r.state.task_count) - - r.state.clear(False) - self.assertFalse(r.state.tasks) - - def test_task_types(self): - r = ev_snapshot(State()) - r.play() - self.assertEqual(sorted(r.state.task_types()), ['task1', 'task2']) - - def test_tasks_by_timestamp(self): - r = ev_snapshot(State()) - r.play() - self.assertEqual(len(list(r.state.tasks_by_timestamp())), 20) - - def test_tasks_by_type(self): - r = ev_snapshot(State()) - r.play() - self.assertEqual(len(list(r.state.tasks_by_type('task1'))), 10) - self.assertEqual(len(list(r.state.tasks_by_type('task2'))), 10) - - def test_alive_workers(self): - r = ev_snapshot(State()) - r.play() - self.assertEqual(len(r.state.alive_workers()), 3) - - def test_tasks_by_worker(self): - r = ev_snapshot(State()) - r.play() - self.assertEqual(len(list(r.state.tasks_by_worker('utest1'))), 10) - self.assertEqual(len(list(r.state.tasks_by_worker('utest2'))), 10) - - def test_survives_unknown_worker_event(self): - s = State() - s.event({ - 'type': 'worker-unknown-event-xxx', - 'foo': 'bar', - }) - s.event({ - 'type': 'worker-unknown-event-xxx', - 'hostname': 'xxx', - 'foo': 'bar', - }) - - def test_survives_unknown_worker_leaving(self): - s = State(on_node_leave=Mock(name='on_node_leave')) - (worker, created), subject = s.event({ - 'type': 'worker-offline', - 'hostname': 'unknown@vandelay.com', - 'timestamp': time(), - 'local_received': time(), - 'clock': 301030134894833, - }) - self.assertEqual(worker, Worker('unknown@vandelay.com')) - self.assertFalse(created) - self.assertEqual(subject, 'offline') - self.assertNotIn('unknown@vandelay.com', s.workers) - s.on_node_leave.assert_called_with(worker) - - def test_on_node_join_callback(self): - s = State(on_node_join=Mock(name='on_node_join')) - (worker, created), subject = s.event({ - 'type': 'worker-online', - 'hostname': 'george@vandelay.com', - 'timestamp': time(), - 'local_received': time(), - 'clock': 34314, - }) - self.assertTrue(worker) - self.assertTrue(created) - self.assertEqual(subject, 'online') - self.assertIn('george@vandelay.com', s.workers) - s.on_node_join.assert_called_with(worker) - - def test_survives_unknown_task_event(self): - s = State() - s.event( - { - 'type': 'task-unknown-event-xxx', - 'foo': 'bar', - 'uuid': 'x', - 'hostname': 'y', - 'timestamp': time(), - 'local_received': time(), - 'clock': 0, - }, - ) - - def test_limits_maxtasks(self): - s = State(max_tasks_in_memory=1) - s.heap_multiplier = 2 - s.event({ - 'type': 'task-unknown-event-xxx', - 'foo': 'bar', - 'uuid': 'x', - 'hostname': 'y', - 'clock': 3, - 'timestamp': time(), - 'local_received': time(), - }) - s.event({ - 'type': 'task-unknown-event-xxx', - 'foo': 'bar', - 'uuid': 'y', - 'hostname': 'y', - 'clock': 4, - 'timestamp': time(), - 'local_received': time(), - }) - s.event({ - 'type': 'task-unknown-event-xxx', - 'foo': 'bar', - 'uuid': 'z', - 'hostname': 'y', - 'clock': 5, - 'timestamp': time(), - 'local_received': time(), - }) - self.assertEqual(len(s._taskheap), 2) - self.assertEqual(s._taskheap[0].clock, 4) - self.assertEqual(s._taskheap[1].clock, 5) - - s._taskheap.append(s._taskheap[0]) - self.assertTrue(list(s.tasks_by_time())) - - def test_callback(self): - scratch = {} - - def callback(state, event): - scratch['recv'] = True - - s = State(callback=callback) - s.event({'type': 'worker-online'}) - self.assertTrue(scratch.get('recv')) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/fixups/test_django.py b/thesisenv/lib/python3.6/site-packages/celery/tests/fixups/test_django.py deleted file mode 100644 index 94b755e..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/fixups/test_django.py +++ /dev/null @@ -1,301 +0,0 @@ -from __future__ import absolute_import - -import os - -from contextlib import contextmanager - -from celery.fixups.django import ( - _maybe_close_fd, - fixup, - DjangoFixup, - DjangoWorkerFixup, -) - -from celery.tests.case import ( - AppCase, Mock, patch, patch_many, patch_modules, mask_modules, -) - - -class FixupCase(AppCase): - Fixup = None - - @contextmanager - def fixup_context(self, app): - with patch('celery.fixups.django.DjangoWorkerFixup.validate_models'): - with patch('celery.fixups.django.symbol_by_name') as symbyname: - with patch('celery.fixups.django.import_module') as impmod: - f = self.Fixup(app) - yield f, impmod, symbyname - - -class test_DjangoFixup(FixupCase): - Fixup = DjangoFixup - - def test_fixup(self): - with patch('celery.fixups.django.DjangoFixup') as Fixup: - with patch.dict(os.environ, DJANGO_SETTINGS_MODULE=''): - fixup(self.app) - self.assertFalse(Fixup.called) - with patch.dict(os.environ, DJANGO_SETTINGS_MODULE='settings'): - with mask_modules('django'): - with self.assertWarnsRegex(UserWarning, 'but Django is'): - fixup(self.app) - self.assertFalse(Fixup.called) - with patch_modules('django'): - fixup(self.app) - self.assertTrue(Fixup.called) - - def test_maybe_close_fd(self): - with patch('os.close'): - _maybe_close_fd(Mock()) - _maybe_close_fd(object()) - - def test_init(self): - with self.fixup_context(self.app) as (f, importmod, sym): - self.assertTrue(f) - - def se(name): - if name == 'django.utils.timezone:now': - raise ImportError() - return Mock() - sym.side_effect = se - self.assertTrue(self.Fixup(self.app)._now) - - def test_install(self): - self.app.loader = Mock() - with self.fixup_context(self.app) as (f, _, _): - with patch_many('os.getcwd', 'sys.path', - 'celery.fixups.django.signals') as (cw, p, sigs): - cw.return_value = '/opt/vandelay' - f.install() - sigs.worker_init.connect.assert_called_with(f.on_worker_init) - self.assertEqual(self.app.loader.now, f.now) - self.assertEqual(self.app.loader.mail_admins, f.mail_admins) - p.append.assert_called_with('/opt/vandelay') - - def test_now(self): - with self.fixup_context(self.app) as (f, _, _): - self.assertTrue(f.now(utc=True)) - self.assertFalse(f._now.called) - self.assertTrue(f.now(utc=False)) - self.assertTrue(f._now.called) - - def test_mail_admins(self): - with self.fixup_context(self.app) as (f, _, _): - f.mail_admins('sub', 'body', True) - f._mail_admins.assert_called_with( - 'sub', 'body', fail_silently=True, - ) - - def test_on_worker_init(self): - with self.fixup_context(self.app) as (f, _, _): - with patch('celery.fixups.django.DjangoWorkerFixup') as DWF: - f.on_worker_init() - DWF.assert_called_with(f.app) - DWF.return_value.install.assert_called_with() - self.assertIs(f._worker_fixup, DWF.return_value) - - -class test_DjangoWorkerFixup(FixupCase): - Fixup = DjangoWorkerFixup - - def test_init(self): - with self.fixup_context(self.app) as (f, importmod, sym): - self.assertTrue(f) - - def se(name): - if name == 'django.db:close_old_connections': - raise ImportError() - return Mock() - sym.side_effect = se - self.assertIsNone(self.Fixup(self.app)._close_old_connections) - - def test_install(self): - self.app.conf = {'CELERY_DB_REUSE_MAX': None} - self.app.loader = Mock() - with self.fixup_context(self.app) as (f, _, _): - with patch_many('celery.fixups.django.signals') as (sigs, ): - f.install() - sigs.beat_embedded_init.connect.assert_called_with( - f.close_database, - ) - sigs.worker_ready.connect.assert_called_with(f.on_worker_ready) - sigs.task_prerun.connect.assert_called_with(f.on_task_prerun) - sigs.task_postrun.connect.assert_called_with(f.on_task_postrun) - sigs.worker_process_init.connect.assert_called_with( - f.on_worker_process_init, - ) - - def test_on_worker_process_init(self): - with self.fixup_context(self.app) as (f, _, _): - with patch('celery.fixups.django._maybe_close_fd') as mcf: - _all = f._db.connections.all = Mock() - conns = _all.return_value = [ - Mock(), Mock(), - ] - conns[0].connection = None - with patch.object(f, 'close_cache'): - with patch.object(f, '_close_database'): - f.on_worker_process_init() - mcf.assert_called_with(conns[1].connection) - f.close_cache.assert_called_with() - f._close_database.assert_called_with() - - mcf.reset_mock() - _all.side_effect = AttributeError() - f.on_worker_process_init() - mcf.assert_called_with(f._db.connection.connection) - f._db.connection = None - f.on_worker_process_init() - - def test_on_task_prerun(self): - task = Mock() - with self.fixup_context(self.app) as (f, _, _): - task.request.is_eager = False - with patch.object(f, 'close_database'): - f.on_task_prerun(task) - f.close_database.assert_called_with() - - task.request.is_eager = True - with patch.object(f, 'close_database'): - f.on_task_prerun(task) - self.assertFalse(f.close_database.called) - - def test_on_task_postrun(self): - task = Mock() - with self.fixup_context(self.app) as (f, _, _): - with patch.object(f, 'close_cache'): - task.request.is_eager = False - with patch.object(f, 'close_database'): - f.on_task_postrun(task) - self.assertTrue(f.close_database.called) - self.assertTrue(f.close_cache.called) - - # when a task is eager, do not close connections - with patch.object(f, 'close_cache'): - task.request.is_eager = True - with patch.object(f, 'close_database'): - f.on_task_postrun(task) - self.assertFalse(f.close_database.called) - self.assertFalse(f.close_cache.called) - - def test_close_database(self): - with self.fixup_context(self.app) as (f, _, _): - f._close_old_connections = Mock() - f.close_database() - f._close_old_connections.assert_called_with() - f._close_old_connections = None - with patch.object(f, '_close_database') as _close: - f.db_reuse_max = None - f.close_database() - _close.assert_called_with() - _close.reset_mock() - - f.db_reuse_max = 10 - f._db_recycles = 3 - f.close_database() - self.assertFalse(_close.called) - self.assertEqual(f._db_recycles, 4) - _close.reset_mock() - - f._db_recycles = 20 - f.close_database() - _close.assert_called_with() - self.assertEqual(f._db_recycles, 1) - - def test__close_database(self): - with self.fixup_context(self.app) as (f, _, _): - conns = [Mock(), Mock(), Mock()] - conns[1].close.side_effect = KeyError('already closed') - f.database_errors = (KeyError, ) - - f._db.connections = Mock() # ConnectionHandler - f._db.connections.all.side_effect = lambda: conns - - f._close_database() - conns[0].close.assert_called_with() - conns[1].close.assert_called_with() - conns[2].close.assert_called_with() - - conns[1].close.side_effect = KeyError('omg') - with self.assertRaises(KeyError): - f._close_database() - - class Object(object): - pass - o = Object() - o.close_connection = Mock() - f._db = o - f._close_database() - o.close_connection.assert_called_with() - - def test_close_cache(self): - with self.fixup_context(self.app) as (f, _, _): - f.close_cache() - f._cache.cache.close.assert_called_with() - f._cache.cache.close.side_effect = TypeError() - f.close_cache() - - def test_on_worker_ready(self): - with self.fixup_context(self.app) as (f, _, _): - f._settings.DEBUG = False - f.on_worker_ready() - with self.assertWarnsRegex(UserWarning, r'leads to a memory leak'): - f._settings.DEBUG = True - f.on_worker_ready() - - def test_mysql_errors(self): - with patch_modules('MySQLdb'): - import MySQLdb as mod - mod.DatabaseError = Mock() - mod.InterfaceError = Mock() - mod.OperationalError = Mock() - with self.fixup_context(self.app) as (f, _, _): - self.assertIn(mod.DatabaseError, f.database_errors) - self.assertIn(mod.InterfaceError, f.database_errors) - self.assertIn(mod.OperationalError, f.database_errors) - with mask_modules('MySQLdb'): - with self.fixup_context(self.app): - pass - - def test_pg_errors(self): - with patch_modules('psycopg2'): - import psycopg2 as mod - mod.DatabaseError = Mock() - mod.InterfaceError = Mock() - mod.OperationalError = Mock() - with self.fixup_context(self.app) as (f, _, _): - self.assertIn(mod.DatabaseError, f.database_errors) - self.assertIn(mod.InterfaceError, f.database_errors) - self.assertIn(mod.OperationalError, f.database_errors) - with mask_modules('psycopg2'): - with self.fixup_context(self.app): - pass - - def test_sqlite_errors(self): - with patch_modules('sqlite3'): - import sqlite3 as mod - mod.DatabaseError = Mock() - mod.InterfaceError = Mock() - mod.OperationalError = Mock() - with self.fixup_context(self.app) as (f, _, _): - self.assertIn(mod.DatabaseError, f.database_errors) - self.assertIn(mod.InterfaceError, f.database_errors) - self.assertIn(mod.OperationalError, f.database_errors) - with mask_modules('sqlite3'): - with self.fixup_context(self.app): - pass - - def test_oracle_errors(self): - with patch_modules('cx_Oracle'): - import cx_Oracle as mod - mod.DatabaseError = Mock() - mod.InterfaceError = Mock() - mod.OperationalError = Mock() - with self.fixup_context(self.app) as (f, _, _): - self.assertIn(mod.DatabaseError, f.database_errors) - self.assertIn(mod.InterfaceError, f.database_errors) - self.assertIn(mod.OperationalError, f.database_errors) - with mask_modules('cx_Oracle'): - with self.fixup_context(self.app): - pass diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/functional/case.py b/thesisenv/lib/python3.6/site-packages/celery/tests/functional/case.py deleted file mode 100644 index 298c684..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/functional/case.py +++ /dev/null @@ -1,178 +0,0 @@ -from __future__ import absolute_import - -import atexit -import logging -import os -import signal -import socket -import sys -import traceback - -from itertools import count -from time import time - -from celery import current_app -from celery.exceptions import TimeoutError -from celery.app.control import flatten_reply -from celery.utils.imports import qualname - -from celery.tests.case import Case - -HOSTNAME = socket.gethostname() - - -def say(msg): - sys.stderr.write('%s\n' % msg) - - -def try_while(fun, reason='Timed out', timeout=10, interval=0.5): - time_start = time() - for iterations in count(0): - if time() - time_start >= timeout: - raise TimeoutError() - ret = fun() - if ret: - return ret - - -class Worker(object): - started = False - worker_ids = count(1) - _shutdown_called = False - - def __init__(self, hostname, loglevel='error', app=None): - self.hostname = hostname - self.loglevel = loglevel - self.app = app or current_app._get_current_object() - - def start(self): - if not self.started: - self._fork_and_exec() - self.started = True - - def _fork_and_exec(self): - pid = os.fork() - if pid == 0: - self.app.worker_main(['worker', '--loglevel=INFO', - '-n', self.hostname, - '-P', 'solo']) - os._exit(0) - self.pid = pid - - def ping(self, *args, **kwargs): - return self.app.control.ping(*args, **kwargs) - - def is_alive(self, timeout=1): - r = self.ping(destination=[self.hostname], timeout=timeout) - return self.hostname in flatten_reply(r) - - def wait_until_started(self, timeout=10, interval=0.5): - try_while( - lambda: self.is_alive(interval), - "Worker won't start (after %s secs.)" % timeout, - interval=interval, timeout=timeout, - ) - say('--WORKER %s IS ONLINE--' % self.hostname) - - def ensure_shutdown(self, timeout=10, interval=0.5): - os.kill(self.pid, signal.SIGTERM) - try_while( - lambda: not self.is_alive(interval), - "Worker won't shutdown (after %s secs.)" % timeout, - timeout=10, interval=0.5, - ) - say('--WORKER %s IS SHUTDOWN--' % self.hostname) - self._shutdown_called = True - - def ensure_started(self): - self.start() - self.wait_until_started() - - @classmethod - def managed(cls, hostname=None, caller=None): - hostname = hostname or socket.gethostname() - if caller: - hostname = '.'.join([qualname(caller), hostname]) - else: - hostname += str(next(cls.worker_ids())) - worker = cls(hostname) - worker.ensure_started() - stack = traceback.format_stack() - - @atexit.register - def _ensure_shutdown_once(): - if not worker._shutdown_called: - say('-- Found worker not stopped at shutdown: %s\n%s' % ( - worker.hostname, - '\n'.join(stack))) - worker.ensure_shutdown() - - return worker - - -class WorkerCase(Case): - hostname = HOSTNAME - worker = None - - @classmethod - def setUpClass(cls): - logging.getLogger('amqp').setLevel(logging.ERROR) - cls.worker = Worker.managed(cls.hostname, caller=cls) - - @classmethod - def tearDownClass(cls): - cls.worker.ensure_shutdown() - - def assertWorkerAlive(self, timeout=1): - self.assertTrue(self.worker.is_alive) - - def inspect(self, timeout=1): - return self.app.control.inspect([self.worker.hostname], - timeout=timeout) - - def my_response(self, response): - return flatten_reply(response)[self.worker.hostname] - - def is_accepted(self, task_id, interval=0.5): - active = self.inspect(timeout=interval).active() - if active: - for task in active[self.worker.hostname]: - if task['id'] == task_id: - return True - return False - - def is_reserved(self, task_id, interval=0.5): - reserved = self.inspect(timeout=interval).reserved() - if reserved: - for task in reserved[self.worker.hostname]: - if task['id'] == task_id: - return True - return False - - def is_scheduled(self, task_id, interval=0.5): - schedule = self.inspect(timeout=interval).scheduled() - if schedule: - for item in schedule[self.worker.hostname]: - if item['request']['id'] == task_id: - return True - return False - - def is_received(self, task_id, interval=0.5): - return (self.is_reserved(task_id, interval) or - self.is_scheduled(task_id, interval) or - self.is_accepted(task_id, interval)) - - def ensure_accepted(self, task_id, interval=0.5, timeout=10): - return try_while(lambda: self.is_accepted(task_id, interval), - 'Task not accepted within timeout', - interval=0.5, timeout=10) - - def ensure_received(self, task_id, interval=0.5, timeout=10): - return try_while(lambda: self.is_received(task_id, interval), - 'Task not receied within timeout', - interval=0.5, timeout=10) - - def ensure_scheduled(self, task_id, interval=0.5, timeout=10): - return try_while(lambda: self.is_scheduled(task_id, interval), - 'Task not scheduled within timeout', - interval=0.5, timeout=10) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/functional/tasks.py b/thesisenv/lib/python3.6/site-packages/celery/tests/functional/tasks.py deleted file mode 100644 index 85479b4..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/functional/tasks.py +++ /dev/null @@ -1,24 +0,0 @@ -from __future__ import absolute_import - -import time - -from celery import task, signature - - -@task() -def add(x, y): - return x + y - - -@task() -def add_cb(x, y, callback=None): - result = x + y - if callback: - return signature(callback).apply_async(result) - return result - - -@task() -def sleeptask(i): - time.sleep(i) - return i diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/security/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/security/__init__.py deleted file mode 100644 index 50b7f4c..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/security/__init__.py +++ /dev/null @@ -1,68 +0,0 @@ -from __future__ import absolute_import -""" -Keys and certificates for tests (KEY1 is a private key of CERT1, etc.) - -Generated with `extra/security/get-cert.sh` - -""" -KEY1 = """-----BEGIN RSA PRIVATE KEY----- -MIICXQIBAAKBgQC9Twh0V5q/R1Q8N+Y+CNM4lj9AXeZL0gYowoK1ht2ZLCDU9vN5 -dhV0x3sqaXLjQNeCGd6b2vTbFGdF2E45//IWz6/BdPFWaPm0rtYbcxZHqXDZScRp -vFDLHhMysdqQWHxXVxpqIXXo4B7bnfnGvXhYwYITeEyQylV/rnH53mdV8wIDAQAB -AoGBAKUJN4elr+S9nHP7D6BZNTsJ0Q6eTd0ftfrmx+jVMG8Oh3jh6ZSkG0R5e6iX -0W7I4pgrUWRyWDB98yJy1o+90CAN/D80o8SbmW/zfA2WLBteOujMfCEjNrc/Nodf -6MZ0QQ6PnPH6pp94i3kNmFD8Mlzm+ODrUjPF0dCNf474qeKhAkEA7SXj5cQPyQXM -s15oGX5eb6VOk96eAPtEC72cLSh6o+VYmXyGroV1A2JPm6IzH87mTqjWXG229hjt -XVvDbdY2uQJBAMxblWFaWJhhU6Y1euazaBl/OyLYlqNz4LZ0RzCulEoV/gMGYU32 -PbilD5fpFsyhp5oCxnWNEsUFovYMKjKM3AsCQQCIlOcBoP76ZxWzRK8t56MaKBnu -fiuAIzbYkDbPp12i4Wc61wZ2ozR2Y3u4Bh3tturb6M+04hea+1ZSC5StwM85AkAp -UPLYpe13kWXaGsHoVqlbTk/kcamzDkCGYufpvcIZYGzkq6uMmZZM+II4klWbtasv -BhSdu5Hp54PU/wyg/72VAkBy1/oM3/QJ35Vb6TByHBLFR4nOuORoRclmxcoCPva9 -xqkQQn+UgBtOemRXpFCuKaoXonA3nLeB54SWcC6YUOcR ------END RSA PRIVATE KEY-----""" - -KEY2 = """-----BEGIN RSA PRIVATE KEY----- -MIICXQIBAAKBgQDH22L8b9AmST9ABDmQTQ2DWMdDmK5YXZt4AIY81IcsTQ/ccM0C -fwXEP9tdkYwtcxMCWdASwY5pfMy9vFp0hyrRQMSNfuoxAgONuNWPyQoIvY3ZXRe6 -rS+hb/LN4+vdjX+oxmYiQ2HmSB9rh2bepE6Cw+RLJr5sXXq+xZJ+BLt5tQIDAQAB -AoGBAMGBO0Arip/nP6Rd8tYypKjN5nEefX/1cjgoWdC//fj4zCil1vlZv12abm0U -JWNEDd2y0/G1Eow0V5BFtFcrIFowU44LZEiSf7sKXlNHRHlbZmDgNXFZOt7nVbHn -6SN+oCYjaPjji8idYeb3VQXPtqMoMn73MuyxD3k3tWmVLonpAkEA6hsu62qhUk5k -Nt88UZOauU1YizxsWvT0bHioaceE4TEsbO3NZs7dmdJIcRFcU787lANaaIq7Rw26 -qcumME9XhwJBANqMOzsYQ6BX54UzS6x99Jjlq9MEbTCbAEZr/yjopb9f617SwfuE -AEKnIq3HL6/Tnhv3V8Zy3wYHgDoGNeTVe+MCQQDi/nyeNAQ8RFqTgh2Ak/jAmCi0 -yV/fSgj+bHgQKS/FEuMas/IoL4lbrzQivkyhv5lLSX0ORQaWPM+z+A0qZqRdAkBh -XE+Wx/x4ljCh+nQf6AzrgIXHgBVUrfi1Zq9Jfjs4wnaMy793WRr0lpiwaigoYFHz -i4Ei+1G30eeh8dpYk3KZAkB0ucTOsQynDlL5rLGYZ+IcfSfH3w2l5EszY47kKQG9 -Fxeq/HOp9JYw4gRu6Ycvqu57KHwpHhR0FCXRBxuYcJ5V ------END RSA PRIVATE KEY-----""" - -CERT1 = """-----BEGIN CERTIFICATE----- -MIICVzCCAcACCQC72PP7b7H9BTANBgkqhkiG9w0BAQUFADBwMQswCQYDVQQGEwJV -UzELMAkGA1UECBMCQ0ExCzAJBgNVBAcTAlNGMQ8wDQYDVQQKEwZDZWxlcnkxDzAN -BgNVBAMTBkNFbGVyeTElMCMGCSqGSIb3DQEJARYWY2VydEBjZWxlcnlwcm9qZWN0 -Lm9yZzAeFw0xMzA3MjQxMjExMTRaFw0xNDA3MjQxMjExMTRaMHAxCzAJBgNVBAYT -AlVTMQswCQYDVQQIEwJDQTELMAkGA1UEBxMCU0YxDzANBgNVBAoTBkNlbGVyeTEP -MA0GA1UEAxMGQ0VsZXJ5MSUwIwYJKoZIhvcNAQkBFhZjZXJ0QGNlbGVyeXByb2pl -Y3Qub3JnMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC9Twh0V5q/R1Q8N+Y+ -CNM4lj9AXeZL0gYowoK1ht2ZLCDU9vN5dhV0x3sqaXLjQNeCGd6b2vTbFGdF2E45 -//IWz6/BdPFWaPm0rtYbcxZHqXDZScRpvFDLHhMysdqQWHxXVxpqIXXo4B7bnfnG -vXhYwYITeEyQylV/rnH53mdV8wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAKA4tD3J -94tsnQxFxHP7Frt7IvGMH+3wMqOiXFgYxPJX2tyaPvOLJ/7ERE4MkrvZO7IRC0iA -yKBe0pucdrTgsJoDV8juahuyjXOjvU14+q7Wv7pj7zqddVavzK8STLX4/FMIDnbK -aMGJl7wyj6V2yy6ANSbmy0uQjHikI6DrZEoK ------END CERTIFICATE-----""" - -CERT2 = """-----BEGIN CERTIFICATE----- -MIICATCCAWoCCQCV/9A2ZBM37TANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB -VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0 -cyBQdHkgTHRkMB4XDTExMDcxOTA5MDkwMloXDTEyMDcxODA5MDkwMlowRTELMAkG -A1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0 -IFdpZGdpdHMgUHR5IEx0ZDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAx9ti -/G/QJkk/QAQ5kE0Ng1jHQ5iuWF2beACGPNSHLE0P3HDNAn8FxD/bXZGMLXMTAlnQ -EsGOaXzMvbxadIcq0UDEjX7qMQIDjbjVj8kKCL2N2V0Xuq0voW/yzePr3Y1/qMZm -IkNh5kgfa4dm3qROgsPkSya+bF16vsWSfgS7ebUCAwEAATANBgkqhkiG9w0BAQUF -AAOBgQBzaZ5vBkzksPhnWb2oobuy6Ne/LMEtdQ//qeVY4sKl2tOJUCSdWRen9fqP -e+zYdEdkFCd8rp568Eiwkq/553uy4rlE927/AEqs/+KGYmAtibk/9vmi+/+iZXyS -WWZybzzDZFncq1/N1C3Y/hrCBNDFO4TsnTLAhWtZ4c0vDAiacw== ------END CERTIFICATE-----""" diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/security/case.py b/thesisenv/lib/python3.6/site-packages/celery/tests/security/case.py deleted file mode 100644 index ba421a9..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/security/case.py +++ /dev/null @@ -1,16 +0,0 @@ -from __future__ import absolute_import - -from celery.tests.case import AppCase, SkipTest - -import sys - - -class SecurityCase(AppCase): - - def setup(self): - if sys.version_info[0] == 3: - raise SkipTest('PyOpenSSL does not work on Python 3') - try: - from OpenSSL import crypto # noqa - except ImportError: - raise SkipTest('OpenSSL.crypto not installed') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_certificate.py b/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_certificate.py deleted file mode 100644 index 6e153bd..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_certificate.py +++ /dev/null @@ -1,78 +0,0 @@ -from __future__ import absolute_import - -from celery.exceptions import SecurityError -from celery.security.certificate import Certificate, CertStore, FSCertStore - -from . import CERT1, CERT2, KEY1 -from .case import SecurityCase - -from celery.tests.case import Mock, SkipTest, mock_open, patch - - -class test_Certificate(SecurityCase): - - def test_valid_certificate(self): - Certificate(CERT1) - Certificate(CERT2) - - def test_invalid_certificate(self): - self.assertRaises((SecurityError, TypeError), Certificate, None) - self.assertRaises(SecurityError, Certificate, '') - self.assertRaises(SecurityError, Certificate, 'foo') - self.assertRaises(SecurityError, Certificate, CERT1[:20] + CERT1[21:]) - self.assertRaises(SecurityError, Certificate, KEY1) - - def test_has_expired(self): - raise SkipTest('cert actually expired') - self.assertFalse(Certificate(CERT1).has_expired()) - - -class test_CertStore(SecurityCase): - - def test_itercerts(self): - cert1 = Certificate(CERT1) - cert2 = Certificate(CERT2) - certstore = CertStore() - for c in certstore.itercerts(): - self.assertTrue(False) - certstore.add_cert(cert1) - certstore.add_cert(cert2) - for c in certstore.itercerts(): - self.assertIn(c, (cert1, cert2)) - - def test_duplicate(self): - cert1 = Certificate(CERT1) - certstore = CertStore() - certstore.add_cert(cert1) - self.assertRaises(SecurityError, certstore.add_cert, cert1) - - -class test_FSCertStore(SecurityCase): - - @patch('os.path.isdir') - @patch('glob.glob') - @patch('celery.security.certificate.Certificate') - def test_init(self, Certificate, glob, isdir): - cert = Certificate.return_value = Mock() - cert.has_expired.return_value = False - isdir.return_value = True - glob.return_value = ['foo.cert'] - with mock_open(): - cert.get_id.return_value = 1 - x = FSCertStore('/var/certs') - self.assertIn(1, x._certs) - glob.assert_called_with('/var/certs/*') - - # they both end up with the same id - glob.return_value = ['foo.cert', 'bar.cert'] - with self.assertRaises(SecurityError): - x = FSCertStore('/var/certs') - glob.return_value = ['foo.cert'] - - cert.has_expired.return_value = True - with self.assertRaises(SecurityError): - x = FSCertStore('/var/certs') - - isdir.return_value = False - with self.assertRaises(SecurityError): - x = FSCertStore('/var/certs') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_key.py b/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_key.py deleted file mode 100644 index d8551b2..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_key.py +++ /dev/null @@ -1,26 +0,0 @@ -from __future__ import absolute_import - -from celery.exceptions import SecurityError -from celery.security.key import PrivateKey - -from . import CERT1, KEY1, KEY2 -from .case import SecurityCase - - -class test_PrivateKey(SecurityCase): - - def test_valid_private_key(self): - PrivateKey(KEY1) - PrivateKey(KEY2) - - def test_invalid_private_key(self): - self.assertRaises((SecurityError, TypeError), PrivateKey, None) - self.assertRaises(SecurityError, PrivateKey, '') - self.assertRaises(SecurityError, PrivateKey, 'foo') - self.assertRaises(SecurityError, PrivateKey, KEY1[:20] + KEY1[21:]) - self.assertRaises(SecurityError, PrivateKey, CERT1) - - def test_sign(self): - pkey = PrivateKey(KEY1) - pkey.sign('test', 'sha1') - self.assertRaises(ValueError, pkey.sign, 'test', 'unknown') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_security.py b/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_security.py deleted file mode 100644 index 227c65a..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_security.py +++ /dev/null @@ -1,110 +0,0 @@ -""" -Keys and certificates for tests (KEY1 is a private key of CERT1, etc.) - -Generated with: - -.. code-block:: bash - - $ openssl genrsa -des3 -passout pass:test -out key1.key 1024 - $ openssl req -new -key key1.key -out key1.csr -passin pass:test - $ cp key1.key key1.key.org - $ openssl rsa -in key1.key.org -out key1.key -passin pass:test - $ openssl x509 -req -days 365 -in cert1.csr \ - -signkey key1.key -out cert1.crt - $ rm key1.key.org cert1.csr - -""" -from __future__ import absolute_import - -from kombu.serialization import disable_insecure_serializers - -from celery.exceptions import ImproperlyConfigured, SecurityError -from celery.five import builtins -from celery.security.utils import reraise_errors -from kombu.serialization import registry - -from .case import SecurityCase - -from celery.tests.case import Mock, mock_open, patch - - -class test_security(SecurityCase): - - def teardown(self): - registry._disabled_content_types.clear() - - def test_disable_insecure_serializers(self): - try: - disabled = registry._disabled_content_types - self.assertTrue(disabled) - - disable_insecure_serializers( - ['application/json', 'application/x-python-serialize'], - ) - self.assertIn('application/x-yaml', disabled) - self.assertNotIn('application/json', disabled) - self.assertNotIn('application/x-python-serialize', disabled) - disabled.clear() - - disable_insecure_serializers(allowed=None) - self.assertIn('application/x-yaml', disabled) - self.assertIn('application/json', disabled) - self.assertIn('application/x-python-serialize', disabled) - finally: - disable_insecure_serializers(allowed=['json']) - - def test_setup_security(self): - disabled = registry._disabled_content_types - self.assertEqual(0, len(disabled)) - - self.app.conf.CELERY_TASK_SERIALIZER = 'json' - self.app.setup_security() - self.assertIn('application/x-python-serialize', disabled) - disabled.clear() - - @patch('celery.security.register_auth') - @patch('celery.security._disable_insecure_serializers') - def test_setup_registry_complete(self, dis, reg, key='KEY', cert='CERT'): - calls = [0] - - def effect(*args): - try: - m = Mock() - m.read.return_value = 'B' if calls[0] else 'A' - return m - finally: - calls[0] += 1 - - self.app.conf.CELERY_TASK_SERIALIZER = 'auth' - with mock_open(side_effect=effect): - with patch('celery.security.registry') as registry: - store = Mock() - self.app.setup_security(['json'], key, cert, store) - dis.assert_called_with(['json']) - reg.assert_called_with('A', 'B', store, 'sha1', 'json') - registry._set_default_serializer.assert_called_with('auth') - - def test_security_conf(self): - self.app.conf.CELERY_TASK_SERIALIZER = 'auth' - with self.assertRaises(ImproperlyConfigured): - self.app.setup_security() - - _import = builtins.__import__ - - def import_hook(name, *args, **kwargs): - if name == 'OpenSSL': - raise ImportError - return _import(name, *args, **kwargs) - - builtins.__import__ = import_hook - with self.assertRaises(ImproperlyConfigured): - self.app.setup_security() - builtins.__import__ = _import - - def test_reraise_errors(self): - with self.assertRaises(SecurityError): - with reraise_errors(errors=(KeyError, )): - raise KeyError('foo') - with self.assertRaises(KeyError): - with reraise_errors(errors=(ValueError, )): - raise KeyError('bar') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_serialization.py b/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_serialization.py deleted file mode 100644 index 50bc4bf..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_serialization.py +++ /dev/null @@ -1,64 +0,0 @@ -from __future__ import absolute_import - -import os -import base64 - -from kombu.serialization import registry - -from celery.exceptions import SecurityError -from celery.security.serialization import SecureSerializer, register_auth -from celery.security.certificate import Certificate, CertStore -from celery.security.key import PrivateKey - -from . import CERT1, CERT2, KEY1, KEY2 -from .case import SecurityCase - - -class test_SecureSerializer(SecurityCase): - - def _get_s(self, key, cert, certs): - store = CertStore() - for c in certs: - store.add_cert(Certificate(c)) - return SecureSerializer(PrivateKey(key), Certificate(cert), store) - - def test_serialize(self): - s = self._get_s(KEY1, CERT1, [CERT1]) - self.assertEqual(s.deserialize(s.serialize('foo')), 'foo') - - def test_deserialize(self): - s = self._get_s(KEY1, CERT1, [CERT1]) - self.assertRaises(SecurityError, s.deserialize, 'bad data') - - def test_unmatched_key_cert(self): - s = self._get_s(KEY1, CERT2, [CERT1, CERT2]) - self.assertRaises(SecurityError, - s.deserialize, s.serialize('foo')) - - def test_unknown_source(self): - s1 = self._get_s(KEY1, CERT1, [CERT2]) - s2 = self._get_s(KEY1, CERT1, []) - self.assertRaises(SecurityError, - s1.deserialize, s1.serialize('foo')) - self.assertRaises(SecurityError, - s2.deserialize, s2.serialize('foo')) - - def test_self_send(self): - s1 = self._get_s(KEY1, CERT1, [CERT1]) - s2 = self._get_s(KEY1, CERT1, [CERT1]) - self.assertEqual(s2.deserialize(s1.serialize('foo')), 'foo') - - def test_separate_ends(self): - s1 = self._get_s(KEY1, CERT1, [CERT2]) - s2 = self._get_s(KEY2, CERT2, [CERT1]) - self.assertEqual(s2.deserialize(s1.serialize('foo')), 'foo') - - def test_register_auth(self): - register_auth(KEY1, CERT1, '') - self.assertIn('application/data', registry._decoders) - - def test_lots_of_sign(self): - for i in range(1000): - rdata = base64.urlsafe_b64encode(os.urandom(265)) - s = self._get_s(KEY1, CERT1, [CERT1]) - self.assertEqual(s.deserialize(s.serialize(rdata)), rdata) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/slow/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/slow/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_canvas.py b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_canvas.py deleted file mode 100644 index 2508025..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_canvas.py +++ /dev/null @@ -1,346 +0,0 @@ -from __future__ import absolute_import - -from celery.canvas import ( - Signature, - chain, - group, - chord, - signature, - xmap, - xstarmap, - chunks, - _maybe_group, - maybe_signature, -) -from celery.result import EagerResult - -from celery.tests.case import AppCase, Mock - -SIG = Signature({'task': 'TASK', - 'args': ('A1', ), - 'kwargs': {'K1': 'V1'}, - 'options': {'task_id': 'TASK_ID'}, - 'subtask_type': ''}) - - -class CanvasCase(AppCase): - - def setup(self): - - @self.app.task(shared=False) - def add(x, y): - return x + y - self.add = add - - @self.app.task(shared=False) - def mul(x, y): - return x * y - self.mul = mul - - @self.app.task(shared=False) - def div(x, y): - return x / y - self.div = div - - -class test_Signature(CanvasCase): - - def test_getitem_property_class(self): - self.assertTrue(Signature.task) - self.assertTrue(Signature.args) - self.assertTrue(Signature.kwargs) - self.assertTrue(Signature.options) - self.assertTrue(Signature.subtask_type) - - def test_getitem_property(self): - self.assertEqual(SIG.task, 'TASK') - self.assertEqual(SIG.args, ('A1', )) - self.assertEqual(SIG.kwargs, {'K1': 'V1'}) - self.assertEqual(SIG.options, {'task_id': 'TASK_ID'}) - self.assertEqual(SIG.subtask_type, '') - - def test_link_on_scalar(self): - x = Signature('TASK', link=Signature('B')) - self.assertTrue(x.options['link']) - x.link(Signature('C')) - self.assertIsInstance(x.options['link'], list) - self.assertIn(Signature('B'), x.options['link']) - self.assertIn(Signature('C'), x.options['link']) - - def test_replace(self): - x = Signature('TASK', ('A'), {}) - self.assertTupleEqual(x.replace(args=('B', )).args, ('B', )) - self.assertDictEqual( - x.replace(kwargs={'FOO': 'BAR'}).kwargs, - {'FOO': 'BAR'}, - ) - self.assertDictEqual( - x.replace(options={'task_id': '123'}).options, - {'task_id': '123'}, - ) - - def test_set(self): - self.assertDictEqual( - Signature('TASK', x=1).set(task_id='2').options, - {'x': 1, 'task_id': '2'}, - ) - - def test_link(self): - x = signature(SIG) - x.link(SIG) - x.link(SIG) - self.assertIn(SIG, x.options['link']) - self.assertEqual(len(x.options['link']), 1) - - def test_link_error(self): - x = signature(SIG) - x.link_error(SIG) - x.link_error(SIG) - self.assertIn(SIG, x.options['link_error']) - self.assertEqual(len(x.options['link_error']), 1) - - def test_flatten_links(self): - tasks = [self.add.s(2, 2), self.mul.s(4), self.div.s(2)] - tasks[0].link(tasks[1]) - tasks[1].link(tasks[2]) - self.assertEqual(tasks[0].flatten_links(), tasks) - - def test_OR(self): - x = self.add.s(2, 2) | self.mul.s(4) - self.assertIsInstance(x, chain) - y = self.add.s(4, 4) | self.div.s(2) - z = x | y - self.assertIsInstance(y, chain) - self.assertIsInstance(z, chain) - self.assertEqual(len(z.tasks), 4) - with self.assertRaises(TypeError): - x | 10 - ax = self.add.s(2, 2) | (self.add.s(4) | self.add.s(8)) - self.assertIsInstance(ax, chain) - self.assertEqual(len(ax.tasks), 3, 'consolidates chain to chain') - - def test_INVERT(self): - x = self.add.s(2, 2) - x.apply_async = Mock() - x.apply_async.return_value = Mock() - x.apply_async.return_value.get = Mock() - x.apply_async.return_value.get.return_value = 4 - self.assertEqual(~x, 4) - self.assertTrue(x.apply_async.called) - - def test_merge_immutable(self): - x = self.add.si(2, 2, foo=1) - args, kwargs, options = x._merge((4, ), {'bar': 2}, {'task_id': 3}) - self.assertTupleEqual(args, (2, 2)) - self.assertDictEqual(kwargs, {'foo': 1}) - self.assertDictEqual(options, {'task_id': 3}) - - def test_set_immutable(self): - x = self.add.s(2, 2) - self.assertFalse(x.immutable) - x.set(immutable=True) - self.assertTrue(x.immutable) - x.set(immutable=False) - self.assertFalse(x.immutable) - - def test_election(self): - x = self.add.s(2, 2) - x.freeze('foo') - x.type.app.control = Mock() - r = x.election() - self.assertTrue(x.type.app.control.election.called) - self.assertEqual(r.id, 'foo') - - def test_AsyncResult_when_not_registered(self): - s = signature('xxx.not.registered', app=self.app) - self.assertTrue(s.AsyncResult) - - def test_apply_async_when_not_registered(self): - s = signature('xxx.not.registered', app=self.app) - self.assertTrue(s._apply_async) - - -class test_xmap_xstarmap(CanvasCase): - - def test_apply(self): - for type, attr in [(xmap, 'map'), (xstarmap, 'starmap')]: - args = [(i, i) for i in range(10)] - s = getattr(self.add, attr)(args) - s.type = Mock() - - s.apply_async(foo=1) - s.type.apply_async.assert_called_with( - (), {'task': self.add.s(), 'it': args}, foo=1, - ) - - self.assertEqual(type.from_dict(dict(s)), s) - self.assertTrue(repr(s)) - - -class test_chunks(CanvasCase): - - def test_chunks(self): - x = self.add.chunks(range(100), 10) - self.assertEqual( - dict(chunks.from_dict(dict(x), app=self.app)), dict(x), - ) - - self.assertTrue(x.group()) - self.assertEqual(len(x.group().tasks), 10) - - x.group = Mock() - gr = x.group.return_value = Mock() - - x.apply_async() - gr.apply_async.assert_called_with((), {}) - - x() - gr.assert_called_with() - - self.app.conf.CELERY_ALWAYS_EAGER = True - chunks.apply_chunks(app=self.app, **x['kwargs']) - - -class test_chain(CanvasCase): - - def test_repr(self): - x = self.add.s(2, 2) | self.add.s(2) - self.assertEqual( - repr(x), '%s(2, 2) | %s(2)' % (self.add.name, self.add.name), - ) - - def test_reverse(self): - x = self.add.s(2, 2) | self.add.s(2) - self.assertIsInstance(signature(x), chain) - self.assertIsInstance(signature(dict(x)), chain) - - def test_always_eager(self): - self.app.conf.CELERY_ALWAYS_EAGER = True - self.assertEqual(~(self.add.s(4, 4) | self.add.s(8)), 16) - - def test_apply(self): - x = chain(self.add.s(4, 4), self.add.s(8), self.add.s(10)) - res = x.apply() - self.assertIsInstance(res, EagerResult) - self.assertEqual(res.get(), 26) - - self.assertEqual(res.parent.get(), 16) - self.assertEqual(res.parent.parent.get(), 8) - self.assertIsNone(res.parent.parent.parent) - - def test_empty_chain_returns_none(self): - self.assertIsNone(chain(app=self.app)()) - self.assertIsNone(chain(app=self.app).apply_async()) - - def test_call_no_tasks(self): - x = chain() - self.assertFalse(x()) - - def test_call_with_tasks(self): - x = self.add.s(2, 2) | self.add.s(4) - x.apply_async = Mock() - x(2, 2, foo=1) - x.apply_async.assert_called_with((2, 2), {'foo': 1}) - - def test_from_dict_no_args__with_args(self): - x = dict(self.add.s(2, 2) | self.add.s(4)) - x['args'] = None - self.assertIsInstance(chain.from_dict(x), chain) - x['args'] = (2, ) - self.assertIsInstance(chain.from_dict(x), chain) - - def test_accepts_generator_argument(self): - x = chain(self.add.s(i) for i in range(10)) - self.assertTrue(x.tasks[0].type, self.add) - self.assertTrue(x.type) - - -class test_group(CanvasCase): - - def test_repr(self): - x = group([self.add.s(2, 2), self.add.s(4, 4)]) - self.assertEqual(repr(x), repr(x.tasks)) - - def test_reverse(self): - x = group([self.add.s(2, 2), self.add.s(4, 4)]) - self.assertIsInstance(signature(x), group) - self.assertIsInstance(signature(dict(x)), group) - - def test_maybe_group_sig(self): - self.assertListEqual( - _maybe_group(self.add.s(2, 2)), [self.add.s(2, 2)], - ) - - def test_from_dict(self): - x = group([self.add.s(2, 2), self.add.s(4, 4)]) - x['args'] = (2, 2) - self.assertTrue(group.from_dict(dict(x))) - x['args'] = None - self.assertTrue(group.from_dict(dict(x))) - - def test_call_empty_group(self): - x = group(app=self.app) - self.assertFalse(len(x())) - x.delay() - x.apply_async() - x() - - def test_skew(self): - g = group([self.add.s(i, i) for i in range(10)]) - g.skew(start=1, stop=10, step=1) - for i, task in enumerate(g.tasks): - self.assertEqual(task.options['countdown'], i + 1) - - def test_iter(self): - g = group([self.add.s(i, i) for i in range(10)]) - self.assertListEqual(list(iter(g)), g.tasks) - - -class test_chord(CanvasCase): - - def test_reverse(self): - x = chord([self.add.s(2, 2), self.add.s(4, 4)], body=self.mul.s(4)) - self.assertIsInstance(signature(x), chord) - self.assertIsInstance(signature(dict(x)), chord) - - def test_clone_clones_body(self): - x = chord([self.add.s(2, 2), self.add.s(4, 4)], body=self.mul.s(4)) - y = x.clone() - self.assertIsNot(x.kwargs['body'], y.kwargs['body']) - y.kwargs.pop('body') - z = y.clone() - self.assertIsNone(z.kwargs.get('body')) - - def test_links_to_body(self): - x = chord([self.add.s(2, 2), self.add.s(4, 4)], body=self.mul.s(4)) - x.link(self.div.s(2)) - self.assertFalse(x.options.get('link')) - self.assertTrue(x.kwargs['body'].options['link']) - - x.link_error(self.div.s(2)) - self.assertFalse(x.options.get('link_error')) - self.assertTrue(x.kwargs['body'].options['link_error']) - - self.assertTrue(x.tasks) - self.assertTrue(x.body) - - def test_repr(self): - x = chord([self.add.s(2, 2), self.add.s(4, 4)], body=self.mul.s(4)) - self.assertTrue(repr(x)) - x.kwargs['body'] = None - self.assertIn('without body', repr(x)) - - -class test_maybe_signature(CanvasCase): - - def test_is_None(self): - self.assertIsNone(maybe_signature(None, app=self.app)) - - def test_is_dict(self): - self.assertIsInstance( - maybe_signature(dict(self.add.s()), app=self.app), Signature, - ) - - def test_when_sig(self): - s = self.add.s() - self.assertIs(maybe_signature(s, app=self.app), s) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_chord.py b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_chord.py deleted file mode 100644 index dcc3304..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_chord.py +++ /dev/null @@ -1,235 +0,0 @@ -from __future__ import absolute_import - -from contextlib import contextmanager - -from celery import group -from celery import canvas -from celery import result -from celery.exceptions import ChordError, Retry -from celery.five import range -from celery.result import AsyncResult, GroupResult, EagerResult -from celery.tests.case import AppCase, Mock - - -def passthru(x): - return x - - -class ChordCase(AppCase): - - def setup(self): - - @self.app.task(shared=False) - def add(x, y): - return x + y - self.add = add - - -class TSR(GroupResult): - is_ready = True - value = None - - def ready(self): - return self.is_ready - - def join(self, propagate=True, **kwargs): - if propagate: - for value in self.value: - if isinstance(value, Exception): - raise value - return self.value - join_native = join - - def _failed_join_report(self): - for value in self.value: - if isinstance(value, Exception): - yield EagerResult('some_id', value, 'FAILURE') - - -class TSRNoReport(TSR): - - def _failed_join_report(self): - return iter([]) - - -@contextmanager -def patch_unlock_retry(app): - unlock = app.tasks['celery.chord_unlock'] - retry = Mock() - retry.return_value = Retry() - prev, unlock.retry = unlock.retry, retry - try: - yield unlock, retry - finally: - unlock.retry = prev - - -class test_unlock_chord_task(ChordCase): - - def test_unlock_ready(self): - - class AlwaysReady(TSR): - is_ready = True - value = [2, 4, 8, 6] - - with self._chord_context(AlwaysReady) as (cb, retry, _): - cb.type.apply_async.assert_called_with( - ([2, 4, 8, 6], ), {}, task_id=cb.id, - ) - # did not retry - self.assertFalse(retry.call_count) - - def test_callback_fails(self): - - class AlwaysReady(TSR): - is_ready = True - value = [2, 4, 8, 6] - - def setup(callback): - callback.apply_async.side_effect = IOError() - - with self._chord_context(AlwaysReady, setup) as (cb, retry, fail): - self.assertTrue(fail.called) - self.assertEqual( - fail.call_args[0][0], cb.id, - ) - self.assertIsInstance( - fail.call_args[1]['exc'], ChordError, - ) - - def test_unlock_ready_failed(self): - - class Failed(TSR): - is_ready = True - value = [2, KeyError('foo'), 8, 6] - - with self._chord_context(Failed) as (cb, retry, fail_current): - self.assertFalse(cb.type.apply_async.called) - # did not retry - self.assertFalse(retry.call_count) - self.assertTrue(fail_current.called) - self.assertEqual( - fail_current.call_args[0][0], cb.id, - ) - self.assertIsInstance( - fail_current.call_args[1]['exc'], ChordError, - ) - self.assertIn('some_id', str(fail_current.call_args[1]['exc'])) - - def test_unlock_ready_failed_no_culprit(self): - class Failed(TSRNoReport): - is_ready = True - value = [2, KeyError('foo'), 8, 6] - - with self._chord_context(Failed) as (cb, retry, fail_current): - self.assertTrue(fail_current.called) - self.assertEqual( - fail_current.call_args[0][0], cb.id, - ) - self.assertIsInstance( - fail_current.call_args[1]['exc'], ChordError, - ) - - @contextmanager - def _chord_context(self, ResultCls, setup=None, **kwargs): - @self.app.task(shared=False) - def callback(*args, **kwargs): - pass - self.app.finalize() - - pts, result.GroupResult = result.GroupResult, ResultCls - callback.apply_async = Mock() - callback_s = callback.s() - callback_s.id = 'callback_id' - fail_current = self.app.backend.fail_from_current_stack = Mock() - try: - with patch_unlock_retry(self.app) as (unlock, retry): - subtask, canvas.maybe_signature = ( - canvas.maybe_signature, passthru, - ) - if setup: - setup(callback) - try: - assert self.app.tasks['celery.chord_unlock'] is unlock - try: - unlock( - 'group_id', callback_s, - result=[ - self.app.AsyncResult(r) for r in ['1', 2, 3] - ], - GroupResult=ResultCls, **kwargs - ) - except Retry: - pass - finally: - canvas.maybe_signature = subtask - yield callback_s, retry, fail_current - finally: - result.GroupResult = pts - - def test_when_not_ready(self): - class NeverReady(TSR): - is_ready = False - - with self._chord_context(NeverReady, interval=10, max_retries=30) \ - as (cb, retry, _): - self.assertFalse(cb.type.apply_async.called) - # did retry - retry.assert_called_with(countdown=10, max_retries=30) - - def test_is_in_registry(self): - self.assertIn('celery.chord_unlock', self.app.tasks) - - -class test_chord(ChordCase): - - def test_eager(self): - from celery import chord - - @self.app.task(shared=False) - def addX(x, y): - return x + y - - @self.app.task(shared=False) - def sumX(n): - return sum(n) - - self.app.conf.CELERY_ALWAYS_EAGER = True - x = chord(addX.s(i, i) for i in range(10)) - body = sumX.s() - result = x(body) - self.assertEqual(result.get(), sum(i + i for i in range(10))) - - def test_apply(self): - self.app.conf.CELERY_ALWAYS_EAGER = False - from celery import chord - - m = Mock() - m.app.conf.CELERY_ALWAYS_EAGER = False - m.AsyncResult = AsyncResult - prev, chord._type = chord._type, m - try: - x = chord(self.add.s(i, i) for i in range(10)) - body = self.add.s(2) - result = x(body) - self.assertTrue(result.id) - # does not modify original subtask - with self.assertRaises(KeyError): - body.options['task_id'] - self.assertTrue(chord._type.called) - finally: - chord._type = prev - - -class test_Chord_task(ChordCase): - - def test_run(self): - self.app.backend = Mock() - self.app.backend.cleanup = Mock() - self.app.backend.cleanup.__name__ = 'cleanup' - Chord = self.app.tasks['celery.chord'] - - body = dict() - Chord(group(self.add.subtask((i, i)) for i in range(5)), body) - Chord([self.add.subtask((j, j)) for j in range(5)], body) - self.assertEqual(self.app.backend.apply_chord.call_count, 2) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_context.py b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_context.py deleted file mode 100644 index ecad3f8..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_context.py +++ /dev/null @@ -1,67 +0,0 @@ -# -*- coding: utf-8 -*-' -from __future__ import absolute_import - -from celery.app.task import Context -from celery.tests.case import AppCase - - -# Retreive the values of all context attributes as a -# dictionary in an implementation-agnostic manner. -def get_context_as_dict(ctx, getter=getattr): - defaults = {} - for attr_name in dir(ctx): - if attr_name.startswith('_'): - continue # Ignore pseudo-private attributes - attr = getter(ctx, attr_name) - if callable(attr): - continue # Ignore methods and other non-trivial types - defaults[attr_name] = attr - return defaults -default_context = get_context_as_dict(Context()) - - -class test_Context(AppCase): - - def test_default_context(self): - # A bit of a tautological test, since it uses the same - # initializer as the default_context constructor. - defaults = dict(default_context, children=[]) - self.assertDictEqual(get_context_as_dict(Context()), defaults) - - def test_updated_context(self): - expected = dict(default_context) - changes = dict(id='unique id', args=['some', 1], wibble='wobble') - ctx = Context() - expected.update(changes) - ctx.update(changes) - self.assertDictEqual(get_context_as_dict(ctx), expected) - self.assertDictEqual(get_context_as_dict(Context()), default_context) - - def test_modified_context(self): - expected = dict(default_context) - ctx = Context() - expected['id'] = 'unique id' - expected['args'] = ['some', 1] - ctx.id = 'unique id' - ctx.args = ['some', 1] - self.assertDictEqual(get_context_as_dict(ctx), expected) - self.assertDictEqual(get_context_as_dict(Context()), default_context) - - def test_cleared_context(self): - changes = dict(id='unique id', args=['some', 1], wibble='wobble') - ctx = Context() - ctx.update(changes) - ctx.clear() - defaults = dict(default_context, children=[]) - self.assertDictEqual(get_context_as_dict(ctx), defaults) - self.assertDictEqual(get_context_as_dict(Context()), defaults) - - def test_context_get(self): - expected = dict(default_context) - changes = dict(id='unique id', args=['some', 1], wibble='wobble') - ctx = Context() - expected.update(changes) - ctx.update(changes) - ctx_dict = get_context_as_dict(ctx, getter=Context.get) - self.assertDictEqual(ctx_dict, expected) - self.assertDictEqual(get_context_as_dict(Context()), default_context) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_result.py b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_result.py deleted file mode 100644 index 50a9e23..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_result.py +++ /dev/null @@ -1,731 +0,0 @@ -from __future__ import absolute_import - -from contextlib import contextmanager - -from celery import states -from celery.exceptions import IncompleteStream, TimeoutError -from celery.five import range -from celery.result import ( - AsyncResult, - EagerResult, - TaskSetResult, - result_from_tuple, -) -from celery.utils import uuid -from celery.utils.serialization import pickle - -from celery.tests.case import AppCase, Mock, depends_on_current_app, patch - - -def mock_task(name, state, result): - return dict(id=uuid(), name=name, state=state, result=result) - - -def save_result(app, task): - traceback = 'Some traceback' - if task['state'] == states.SUCCESS: - app.backend.mark_as_done(task['id'], task['result']) - elif task['state'] == states.RETRY: - app.backend.mark_as_retry( - task['id'], task['result'], traceback=traceback, - ) - else: - app.backend.mark_as_failure( - task['id'], task['result'], traceback=traceback, - ) - - -def make_mock_group(app, size=10): - tasks = [mock_task('ts%d' % i, states.SUCCESS, i) for i in range(size)] - [save_result(app, task) for task in tasks] - return [app.AsyncResult(task['id']) for task in tasks] - - -class test_AsyncResult(AppCase): - - def setup(self): - self.task1 = mock_task('task1', states.SUCCESS, 'the') - self.task2 = mock_task('task2', states.SUCCESS, 'quick') - self.task3 = mock_task('task3', states.FAILURE, KeyError('brown')) - self.task4 = mock_task('task3', states.RETRY, KeyError('red')) - - for task in (self.task1, self.task2, self.task3, self.task4): - save_result(self.app, task) - - @self.app.task(shared=False) - def mytask(): - pass - self.mytask = mytask - - def test_compat_properties(self): - x = self.app.AsyncResult('1') - self.assertEqual(x.task_id, x.id) - x.task_id = '2' - self.assertEqual(x.id, '2') - - def test_children(self): - x = self.app.AsyncResult('1') - children = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)] - x._cache = {'children': children, 'status': states.SUCCESS} - x.backend = Mock() - self.assertTrue(x.children) - self.assertEqual(len(x.children), 3) - - def test_propagates_for_parent(self): - x = self.app.AsyncResult(uuid()) - x.backend = Mock(name='backend') - x.backend.get_task_meta.return_value = {} - x.backend.wait_for.return_value = { - 'status': states.SUCCESS, 'result': 84, - } - x.parent = EagerResult(uuid(), KeyError('foo'), states.FAILURE) - with self.assertRaises(KeyError): - x.get(propagate=True) - self.assertFalse(x.backend.wait_for.called) - - x.parent = EagerResult(uuid(), 42, states.SUCCESS) - self.assertEqual(x.get(propagate=True), 84) - self.assertTrue(x.backend.wait_for.called) - - def test_get_children(self): - tid = uuid() - x = self.app.AsyncResult(tid) - child = [self.app.AsyncResult(uuid()).as_tuple() - for i in range(10)] - x._cache = {'children': child} - self.assertTrue(x.children) - self.assertEqual(len(x.children), 10) - - x._cache = {'status': states.SUCCESS} - x.backend._cache[tid] = {'result': None} - self.assertIsNone(x.children) - - def test_build_graph_get_leaf_collect(self): - x = self.app.AsyncResult('1') - x.backend._cache['1'] = {'status': states.SUCCESS, 'result': None} - c = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)] - x.iterdeps = Mock() - x.iterdeps.return_value = ( - (None, x), - (x, c[0]), - (c[0], c[1]), - (c[1], c[2]) - ) - x.backend.READY_STATES = states.READY_STATES - self.assertTrue(x.graph) - - self.assertIs(x.get_leaf(), 2) - - it = x.collect() - self.assertListEqual(list(it), [ - (x, None), - (c[0], 0), - (c[1], 1), - (c[2], 2), - ]) - - def test_iterdeps(self): - x = self.app.AsyncResult('1') - c = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)] - x._cache = {'status': states.SUCCESS, 'result': None, 'children': c} - for child in c: - child.backend = Mock() - child.backend.get_children.return_value = [] - it = x.iterdeps() - self.assertListEqual(list(it), [ - (None, x), - (x, c[0]), - (x, c[1]), - (x, c[2]), - ]) - x._cache = None - x.ready = Mock() - x.ready.return_value = False - with self.assertRaises(IncompleteStream): - list(x.iterdeps()) - list(x.iterdeps(intermediate=True)) - - def test_eq_not_implemented(self): - self.assertFalse(self.app.AsyncResult('1') == object()) - - @depends_on_current_app - def test_reduce(self): - a1 = self.app.AsyncResult('uuid', task_name=self.mytask.name) - restored = pickle.loads(pickle.dumps(a1)) - self.assertEqual(restored.id, 'uuid') - self.assertEqual(restored.task_name, self.mytask.name) - - a2 = self.app.AsyncResult('uuid') - self.assertEqual(pickle.loads(pickle.dumps(a2)).id, 'uuid') - - def test_successful(self): - ok_res = self.app.AsyncResult(self.task1['id']) - nok_res = self.app.AsyncResult(self.task3['id']) - nok_res2 = self.app.AsyncResult(self.task4['id']) - - self.assertTrue(ok_res.successful()) - self.assertFalse(nok_res.successful()) - self.assertFalse(nok_res2.successful()) - - pending_res = self.app.AsyncResult(uuid()) - self.assertFalse(pending_res.successful()) - - def test_str(self): - ok_res = self.app.AsyncResult(self.task1['id']) - ok2_res = self.app.AsyncResult(self.task2['id']) - nok_res = self.app.AsyncResult(self.task3['id']) - self.assertEqual(str(ok_res), self.task1['id']) - self.assertEqual(str(ok2_res), self.task2['id']) - self.assertEqual(str(nok_res), self.task3['id']) - - pending_id = uuid() - pending_res = self.app.AsyncResult(pending_id) - self.assertEqual(str(pending_res), pending_id) - - def test_repr(self): - ok_res = self.app.AsyncResult(self.task1['id']) - ok2_res = self.app.AsyncResult(self.task2['id']) - nok_res = self.app.AsyncResult(self.task3['id']) - self.assertEqual(repr(ok_res), '' % ( - self.task1['id'])) - self.assertEqual(repr(ok2_res), '' % ( - self.task2['id'])) - self.assertEqual(repr(nok_res), '' % ( - self.task3['id'])) - - pending_id = uuid() - pending_res = self.app.AsyncResult(pending_id) - self.assertEqual(repr(pending_res), '' % ( - pending_id)) - - def test_hash(self): - self.assertEqual(hash(self.app.AsyncResult('x0w991')), - hash(self.app.AsyncResult('x0w991'))) - self.assertNotEqual(hash(self.app.AsyncResult('x0w991')), - hash(self.app.AsyncResult('x1w991'))) - - def test_get_traceback(self): - ok_res = self.app.AsyncResult(self.task1['id']) - nok_res = self.app.AsyncResult(self.task3['id']) - nok_res2 = self.app.AsyncResult(self.task4['id']) - self.assertFalse(ok_res.traceback) - self.assertTrue(nok_res.traceback) - self.assertTrue(nok_res2.traceback) - - pending_res = self.app.AsyncResult(uuid()) - self.assertFalse(pending_res.traceback) - - def test_get(self): - ok_res = self.app.AsyncResult(self.task1['id']) - ok2_res = self.app.AsyncResult(self.task2['id']) - nok_res = self.app.AsyncResult(self.task3['id']) - nok2_res = self.app.AsyncResult(self.task4['id']) - - self.assertEqual(ok_res.get(), 'the') - self.assertEqual(ok2_res.get(), 'quick') - with self.assertRaises(KeyError): - nok_res.get() - self.assertTrue(nok_res.get(propagate=False)) - self.assertIsInstance(nok2_res.result, KeyError) - self.assertEqual(ok_res.info, 'the') - - def test_get_timeout(self): - res = self.app.AsyncResult(self.task4['id']) # has RETRY state - with self.assertRaises(TimeoutError): - res.get(timeout=0.001) - - pending_res = self.app.AsyncResult(uuid()) - with patch('celery.result.time') as _time: - with self.assertRaises(TimeoutError): - pending_res.get(timeout=0.001, interval=0.001) - _time.sleep.assert_called_with(0.001) - - def test_get_timeout_longer(self): - res = self.app.AsyncResult(self.task4['id']) # has RETRY state - with patch('celery.result.time') as _time: - with self.assertRaises(TimeoutError): - res.get(timeout=1, interval=1) - _time.sleep.assert_called_with(1) - - def test_ready(self): - oks = (self.app.AsyncResult(self.task1['id']), - self.app.AsyncResult(self.task2['id']), - self.app.AsyncResult(self.task3['id'])) - self.assertTrue(all(result.ready() for result in oks)) - self.assertFalse(self.app.AsyncResult(self.task4['id']).ready()) - - self.assertFalse(self.app.AsyncResult(uuid()).ready()) - - -class test_ResultSet(AppCase): - - def test_resultset_repr(self): - self.assertTrue(repr(self.app.ResultSet( - [self.app.AsyncResult(t) for t in ['1', '2', '3']]))) - - def test_eq_other(self): - self.assertFalse(self.app.ResultSet([1, 3, 3]) == 1) - self.assertTrue(self.app.ResultSet([1]) == self.app.ResultSet([1])) - - def test_get(self): - x = self.app.ResultSet([self.app.AsyncResult(t) for t in [1, 2, 3]]) - b = x.results[0].backend = Mock() - b.supports_native_join = False - x.join_native = Mock() - x.join = Mock() - x.get() - self.assertTrue(x.join.called) - b.supports_native_join = True - x.get() - self.assertTrue(x.join_native.called) - - def test_get_empty(self): - x = self.app.ResultSet([]) - self.assertIsNone(x.supports_native_join) - x.join = Mock(name='join') - x.get() - self.assertTrue(x.join.called) - - def test_add(self): - x = self.app.ResultSet([1]) - x.add(2) - self.assertEqual(len(x), 2) - x.add(2) - self.assertEqual(len(x), 2) - - @contextmanager - def dummy_copy(self): - with patch('celery.result.copy') as copy: - - def passt(arg): - return arg - copy.side_effect = passt - - yield - - def test_iterate_respects_subpolling_interval(self): - r1 = self.app.AsyncResult(uuid()) - r2 = self.app.AsyncResult(uuid()) - backend = r1.backend = r2.backend = Mock() - backend.subpolling_interval = 10 - - ready = r1.ready = r2.ready = Mock() - - def se(*args, **kwargs): - ready.side_effect = KeyError() - return False - ready.return_value = False - ready.side_effect = se - - x = self.app.ResultSet([r1, r2]) - with self.dummy_copy(): - with patch('celery.result.time') as _time: - with self.assertPendingDeprecation(): - with self.assertRaises(KeyError): - list(x.iterate()) - _time.sleep.assert_called_with(10) - - backend.subpolling_interval = 0 - with patch('celery.result.time') as _time: - with self.assertPendingDeprecation(): - with self.assertRaises(KeyError): - ready.return_value = False - ready.side_effect = se - list(x.iterate()) - self.assertFalse(_time.sleep.called) - - def test_times_out(self): - r1 = self.app.AsyncResult(uuid) - r1.ready = Mock() - r1.ready.return_value = False - x = self.app.ResultSet([r1]) - with self.dummy_copy(): - with patch('celery.result.time'): - with self.assertPendingDeprecation(): - with self.assertRaises(TimeoutError): - list(x.iterate(timeout=1)) - - def test_add_discard(self): - x = self.app.ResultSet([]) - x.add(self.app.AsyncResult('1')) - self.assertIn(self.app.AsyncResult('1'), x.results) - x.discard(self.app.AsyncResult('1')) - x.discard(self.app.AsyncResult('1')) - x.discard('1') - self.assertNotIn(self.app.AsyncResult('1'), x.results) - - x.update([self.app.AsyncResult('2')]) - - def test_clear(self): - x = self.app.ResultSet([]) - r = x.results - x.clear() - self.assertIs(x.results, r) - - -class MockAsyncResultFailure(AsyncResult): - - @property - def result(self): - return KeyError('baz') - - @property - def state(self): - return states.FAILURE - - def get(self, propagate=True, **kwargs): - if propagate: - raise self.result - return self.result - - -class MockAsyncResultSuccess(AsyncResult): - forgotten = False - - def forget(self): - self.forgotten = True - - @property - def result(self): - return 42 - - @property - def state(self): - return states.SUCCESS - - def get(self, **kwargs): - return self.result - - -class SimpleBackend(object): - ids = [] - - def __init__(self, ids=[]): - self.ids = ids - - def get_many(self, *args, **kwargs): - return ((id, {'result': i, 'status': states.SUCCESS}) - for i, id in enumerate(self.ids)) - - -class test_TaskSetResult(AppCase): - - def setup(self): - self.size = 10 - self.ts = TaskSetResult(uuid(), make_mock_group(self.app, self.size)) - - def test_total(self): - self.assertEqual(self.ts.total, self.size) - - def test_compat_properties(self): - self.assertEqual(self.ts.taskset_id, self.ts.id) - self.ts.taskset_id = 'foo' - self.assertEqual(self.ts.taskset_id, 'foo') - - def test_compat_subtasks_kwarg(self): - x = TaskSetResult(uuid(), subtasks=[1, 2, 3]) - self.assertEqual(x.results, [1, 2, 3]) - - def test_itersubtasks(self): - it = self.ts.itersubtasks() - - for i, t in enumerate(it): - self.assertEqual(t.get(), i) - - -class test_GroupResult(AppCase): - - def setup(self): - self.size = 10 - self.ts = self.app.GroupResult( - uuid(), make_mock_group(self.app, self.size), - ) - - @depends_on_current_app - def test_is_pickleable(self): - ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) - self.assertEqual(pickle.loads(pickle.dumps(ts)), ts) - ts2 = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) - self.assertEqual(pickle.loads(pickle.dumps(ts2)), ts2) - - def test_len(self): - self.assertEqual(len(self.ts), self.size) - - def test_eq_other(self): - self.assertFalse(self.ts == 1) - - @depends_on_current_app - def test_reduce(self): - self.assertTrue(pickle.loads(pickle.dumps(self.ts))) - - def test_iterate_raises(self): - ar = MockAsyncResultFailure(uuid(), app=self.app) - ts = self.app.GroupResult(uuid(), [ar]) - with self.assertPendingDeprecation(): - it = ts.iterate() - with self.assertRaises(KeyError): - next(it) - - def test_forget(self): - subs = [MockAsyncResultSuccess(uuid(), app=self.app), - MockAsyncResultSuccess(uuid(), app=self.app)] - ts = self.app.GroupResult(uuid(), subs) - ts.forget() - for sub in subs: - self.assertTrue(sub.forgotten) - - def test_getitem(self): - subs = [MockAsyncResultSuccess(uuid(), app=self.app), - MockAsyncResultSuccess(uuid(), app=self.app)] - ts = self.app.GroupResult(uuid(), subs) - self.assertIs(ts[0], subs[0]) - - def test_save_restore(self): - subs = [MockAsyncResultSuccess(uuid(), app=self.app), - MockAsyncResultSuccess(uuid(), app=self.app)] - ts = self.app.GroupResult(uuid(), subs) - ts.save() - with self.assertRaises(AttributeError): - ts.save(backend=object()) - self.assertEqual(self.app.GroupResult.restore(ts.id).subtasks, - ts.subtasks) - ts.delete() - self.assertIsNone(self.app.GroupResult.restore(ts.id)) - with self.assertRaises(AttributeError): - self.app.GroupResult.restore(ts.id, backend=object()) - - def test_join_native(self): - backend = SimpleBackend() - subtasks = [self.app.AsyncResult(uuid(), backend=backend) - for i in range(10)] - ts = self.app.GroupResult(uuid(), subtasks) - ts.app.backend = backend - backend.ids = [subtask.id for subtask in subtasks] - res = ts.join_native() - self.assertEqual(res, list(range(10))) - - def test_join_native_raises(self): - ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) - ts.iter_native = Mock() - ts.iter_native.return_value = iter([ - (uuid(), {'status': states.FAILURE, 'result': KeyError()}) - ]) - with self.assertRaises(KeyError): - ts.join_native(propagate=True) - - def test_failed_join_report(self): - res = Mock() - ts = self.app.GroupResult(uuid(), [res]) - res.state = states.FAILURE - res.backend.is_cached.return_value = True - self.assertIs(next(ts._failed_join_report()), res) - res.backend.is_cached.return_value = False - with self.assertRaises(StopIteration): - next(ts._failed_join_report()) - - def test_repr(self): - self.assertTrue(repr( - self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) - )) - - def test_children_is_results(self): - ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) - self.assertIs(ts.children, ts.results) - - def test_iter_native(self): - backend = SimpleBackend() - subtasks = [self.app.AsyncResult(uuid(), backend=backend) - for i in range(10)] - ts = self.app.GroupResult(uuid(), subtasks) - ts.app.backend = backend - backend.ids = [subtask.id for subtask in subtasks] - self.assertEqual(len(list(ts.iter_native())), 10) - - def test_iterate_yields(self): - ar = MockAsyncResultSuccess(uuid(), app=self.app) - ar2 = MockAsyncResultSuccess(uuid(), app=self.app) - ts = self.app.GroupResult(uuid(), [ar, ar2]) - with self.assertPendingDeprecation(): - it = ts.iterate() - self.assertEqual(next(it), 42) - self.assertEqual(next(it), 42) - - def test_iterate_eager(self): - ar1 = EagerResult(uuid(), 42, states.SUCCESS) - ar2 = EagerResult(uuid(), 42, states.SUCCESS) - ts = self.app.GroupResult(uuid(), [ar1, ar2]) - with self.assertPendingDeprecation(): - it = ts.iterate() - self.assertEqual(next(it), 42) - self.assertEqual(next(it), 42) - - def test_join_timeout(self): - ar = MockAsyncResultSuccess(uuid(), app=self.app) - ar2 = MockAsyncResultSuccess(uuid(), app=self.app) - ar3 = self.app.AsyncResult(uuid()) - ts = self.app.GroupResult(uuid(), [ar, ar2, ar3]) - with self.assertRaises(TimeoutError): - ts.join(timeout=0.0000001) - - ar4 = self.app.AsyncResult(uuid()) - ar4.get = Mock() - ts2 = self.app.GroupResult(uuid(), [ar4]) - self.assertTrue(ts2.join(timeout=0.1)) - - def test_iter_native_when_empty_group(self): - ts = self.app.GroupResult(uuid(), []) - self.assertListEqual(list(ts.iter_native()), []) - - def test_iterate_simple(self): - with self.assertPendingDeprecation(): - it = self.ts.iterate() - results = sorted(list(it)) - self.assertListEqual(results, list(range(self.size))) - - def test___iter__(self): - self.assertListEqual(list(iter(self.ts)), self.ts.results) - - def test_join(self): - joined = self.ts.join() - self.assertListEqual(joined, list(range(self.size))) - - def test_successful(self): - self.assertTrue(self.ts.successful()) - - def test_failed(self): - self.assertFalse(self.ts.failed()) - - def test_waiting(self): - self.assertFalse(self.ts.waiting()) - - def test_ready(self): - self.assertTrue(self.ts.ready()) - - def test_completed_count(self): - self.assertEqual(self.ts.completed_count(), len(self.ts)) - - -class test_pending_AsyncResult(AppCase): - - def setup(self): - self.task = self.app.AsyncResult(uuid()) - - def test_result(self): - self.assertIsNone(self.task.result) - - -class test_failed_AsyncResult(test_GroupResult): - - def setup(self): - self.size = 11 - subtasks = make_mock_group(self.app, 10) - failed = mock_task('ts11', states.FAILURE, KeyError('Baz')) - save_result(self.app, failed) - failed_res = self.app.AsyncResult(failed['id']) - self.ts = self.app.GroupResult(uuid(), subtasks + [failed_res]) - - def test_completed_count(self): - self.assertEqual(self.ts.completed_count(), len(self.ts) - 1) - - def test_iterate_simple(self): - with self.assertPendingDeprecation(): - it = self.ts.iterate() - - def consume(): - return list(it) - - with self.assertRaises(KeyError): - consume() - - def test_join(self): - with self.assertRaises(KeyError): - self.ts.join() - - def test_successful(self): - self.assertFalse(self.ts.successful()) - - def test_failed(self): - self.assertTrue(self.ts.failed()) - - -class test_pending_Group(AppCase): - - def setup(self): - self.ts = self.app.GroupResult( - uuid(), [self.app.AsyncResult(uuid()), - self.app.AsyncResult(uuid())]) - - def test_completed_count(self): - self.assertEqual(self.ts.completed_count(), 0) - - def test_ready(self): - self.assertFalse(self.ts.ready()) - - def test_waiting(self): - self.assertTrue(self.ts.waiting()) - - def x_join(self): - with self.assertRaises(TimeoutError): - self.ts.join(timeout=0.001) - - def x_join_longer(self): - with self.assertRaises(TimeoutError): - self.ts.join(timeout=1) - - -class test_EagerResult(AppCase): - - def setup(self): - - @self.app.task(shared=False) - def raising(x, y): - raise KeyError(x, y) - self.raising = raising - - def test_wait_raises(self): - res = self.raising.apply(args=[3, 3]) - with self.assertRaises(KeyError): - res.wait() - self.assertTrue(res.wait(propagate=False)) - - def test_wait(self): - res = EagerResult('x', 'x', states.RETRY) - res.wait() - self.assertEqual(res.state, states.RETRY) - self.assertEqual(res.status, states.RETRY) - - def test_forget(self): - res = EagerResult('x', 'x', states.RETRY) - res.forget() - - def test_revoke(self): - res = self.raising.apply(args=[3, 3]) - self.assertFalse(res.revoke()) - - -class test_tuples(AppCase): - - def test_AsyncResult(self): - x = self.app.AsyncResult(uuid()) - self.assertEqual(x, result_from_tuple(x.as_tuple(), self.app)) - self.assertEqual(x, result_from_tuple(x, self.app)) - - def test_with_parent(self): - x = self.app.AsyncResult(uuid()) - x.parent = self.app.AsyncResult(uuid()) - y = result_from_tuple(x.as_tuple(), self.app) - self.assertEqual(y, x) - self.assertEqual(y.parent, x.parent) - self.assertIsInstance(y.parent, AsyncResult) - - def test_compat(self): - uid = uuid() - x = result_from_tuple([uid, []], app=self.app) - self.assertEqual(x.id, uid) - - def test_GroupResult(self): - x = self.app.GroupResult( - uuid(), [self.app.AsyncResult(uuid()) for _ in range(10)], - ) - self.assertEqual(x, result_from_tuple(x.as_tuple(), self.app)) - self.assertEqual(x, result_from_tuple(x, self.app)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_states.py b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_states.py deleted file mode 100644 index b30a4ee..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_states.py +++ /dev/null @@ -1,31 +0,0 @@ -from __future__ import absolute_import - -from celery.states import state -from celery import states -from celery.tests.case import Case - - -class test_state_precedence(Case): - - def test_gt(self): - self.assertGreater(state(states.SUCCESS), - state(states.PENDING)) - self.assertGreater(state(states.FAILURE), - state(states.RECEIVED)) - self.assertGreater(state(states.REVOKED), - state(states.STARTED)) - self.assertGreater(state(states.SUCCESS), - state('CRASHED')) - self.assertGreater(state(states.FAILURE), - state('CRASHED')) - self.assertFalse(state(states.REVOKED) > state('CRASHED')) - - def test_lt(self): - self.assertLess(state(states.PENDING), state(states.SUCCESS)) - self.assertLess(state(states.RECEIVED), state(states.FAILURE)) - self.assertLess(state(states.STARTED), state(states.REVOKED)) - self.assertLess(state('CRASHED'), state(states.SUCCESS)) - self.assertLess(state('CRASHED'), state(states.FAILURE)) - self.assertTrue(state(states.REVOKED) < state('CRASHED')) - self.assertTrue(state(states.REVOKED) <= state('CRASHED')) - self.assertTrue(state('CRASHED') >= state(states.REVOKED)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_tasks.py b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_tasks.py deleted file mode 100644 index 4feae0b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_tasks.py +++ /dev/null @@ -1,464 +0,0 @@ -from __future__ import absolute_import - -from datetime import datetime, timedelta - -from kombu import Queue - -from celery import Task - -from celery.exceptions import Retry -from celery.five import items, range, string_t -from celery.result import EagerResult -from celery.utils import uuid -from celery.utils.timeutils import parse_iso8601 - -from celery.tests.case import AppCase, depends_on_current_app, patch - - -def return_True(*args, **kwargs): - # Task run functions can't be closures/lambdas, as they're pickled. - return True - - -def raise_exception(self, **kwargs): - raise Exception('%s error' % self.__class__) - - -class MockApplyTask(Task): - abstract = True - applied = 0 - - def run(self, x, y): - return x * y - - def apply_async(self, *args, **kwargs): - self.applied += 1 - - -class TasksCase(AppCase): - - def setup(self): - self.mytask = self.app.task(shared=False)(return_True) - - @self.app.task(bind=True, count=0, shared=False) - def increment_counter(self, increment_by=1): - self.count += increment_by or 1 - return self.count - self.increment_counter = increment_counter - - @self.app.task(shared=False) - def raising(): - raise KeyError('foo') - self.raising = raising - - @self.app.task(bind=True, max_retries=3, iterations=0, shared=False) - def retry_task(self, arg1, arg2, kwarg=1, max_retries=None, care=True): - self.iterations += 1 - rmax = self.max_retries if max_retries is None else max_retries - - assert repr(self.request) - retries = self.request.retries - if care and retries >= rmax: - return arg1 - else: - raise self.retry(countdown=0, max_retries=rmax) - self.retry_task = retry_task - - @self.app.task(bind=True, max_retries=3, iterations=0, shared=False) - def retry_task_noargs(self, **kwargs): - self.iterations += 1 - - if self.request.retries >= 3: - return 42 - else: - raise self.retry(countdown=0) - self.retry_task_noargs = retry_task_noargs - - @self.app.task(bind=True, max_retries=3, iterations=0, - base=MockApplyTask, shared=False) - def retry_task_mockapply(self, arg1, arg2, kwarg=1): - self.iterations += 1 - - retries = self.request.retries - if retries >= 3: - return arg1 - raise self.retry(countdown=0) - self.retry_task_mockapply = retry_task_mockapply - - @self.app.task(bind=True, max_retries=3, iterations=0, shared=False) - def retry_task_customexc(self, arg1, arg2, kwarg=1, **kwargs): - self.iterations += 1 - - retries = self.request.retries - if retries >= 3: - return arg1 + kwarg - else: - try: - raise MyCustomException('Elaine Marie Benes') - except MyCustomException as exc: - kwargs.update(kwarg=kwarg) - raise self.retry(countdown=0, exc=exc) - self.retry_task_customexc = retry_task_customexc - - -class MyCustomException(Exception): - """Random custom exception.""" - - -class test_task_retries(TasksCase): - - def test_retry(self): - self.retry_task.max_retries = 3 - self.retry_task.iterations = 0 - self.retry_task.apply([0xFF, 0xFFFF]) - self.assertEqual(self.retry_task.iterations, 4) - - self.retry_task.max_retries = 3 - self.retry_task.iterations = 0 - self.retry_task.apply([0xFF, 0xFFFF], {'max_retries': 10}) - self.assertEqual(self.retry_task.iterations, 11) - - def test_retry_no_args(self): - self.retry_task_noargs.max_retries = 3 - self.retry_task_noargs.iterations = 0 - self.retry_task_noargs.apply(propagate=True).get() - self.assertEqual(self.retry_task_noargs.iterations, 4) - - def test_retry_kwargs_can_be_empty(self): - self.retry_task_mockapply.push_request() - try: - with self.assertRaises(Retry): - import sys - try: - sys.exc_clear() - except AttributeError: - pass - self.retry_task_mockapply.retry(args=[4, 4], kwargs=None) - finally: - self.retry_task_mockapply.pop_request() - - def test_retry_not_eager(self): - self.retry_task_mockapply.push_request() - try: - self.retry_task_mockapply.request.called_directly = False - exc = Exception('baz') - try: - self.retry_task_mockapply.retry( - args=[4, 4], kwargs={'task_retries': 0}, - exc=exc, throw=False, - ) - self.assertTrue(self.retry_task_mockapply.applied) - finally: - self.retry_task_mockapply.applied = 0 - - try: - with self.assertRaises(Retry): - self.retry_task_mockapply.retry( - args=[4, 4], kwargs={'task_retries': 0}, - exc=exc, throw=True) - self.assertTrue(self.retry_task_mockapply.applied) - finally: - self.retry_task_mockapply.applied = 0 - finally: - self.retry_task_mockapply.pop_request() - - def test_retry_with_kwargs(self): - self.retry_task_customexc.max_retries = 3 - self.retry_task_customexc.iterations = 0 - self.retry_task_customexc.apply([0xFF, 0xFFFF], {'kwarg': 0xF}) - self.assertEqual(self.retry_task_customexc.iterations, 4) - - def test_retry_with_custom_exception(self): - self.retry_task_customexc.max_retries = 2 - self.retry_task_customexc.iterations = 0 - result = self.retry_task_customexc.apply( - [0xFF, 0xFFFF], {'kwarg': 0xF}, - ) - with self.assertRaises(MyCustomException): - result.get() - self.assertEqual(self.retry_task_customexc.iterations, 3) - - def test_max_retries_exceeded(self): - self.retry_task.max_retries = 2 - self.retry_task.iterations = 0 - result = self.retry_task.apply([0xFF, 0xFFFF], {'care': False}) - with self.assertRaises(self.retry_task.MaxRetriesExceededError): - result.get() - self.assertEqual(self.retry_task.iterations, 3) - - self.retry_task.max_retries = 1 - self.retry_task.iterations = 0 - result = self.retry_task.apply([0xFF, 0xFFFF], {'care': False}) - with self.assertRaises(self.retry_task.MaxRetriesExceededError): - result.get() - self.assertEqual(self.retry_task.iterations, 2) - - -class test_canvas_utils(TasksCase): - - def test_si(self): - self.assertTrue(self.retry_task.si()) - self.assertTrue(self.retry_task.si().immutable) - - def test_chunks(self): - self.assertTrue(self.retry_task.chunks(range(100), 10)) - - def test_map(self): - self.assertTrue(self.retry_task.map(range(100))) - - def test_starmap(self): - self.assertTrue(self.retry_task.starmap(range(100))) - - def test_on_success(self): - self.retry_task.on_success(1, 1, (), {}) - - -class test_tasks(TasksCase): - - def now(self): - return self.app.now() - - @depends_on_current_app - def test_unpickle_task(self): - import pickle - - @self.app.task(shared=True) - def xxx(): - pass - self.assertIs(pickle.loads(pickle.dumps(xxx)), xxx.app.tasks[xxx.name]) - - def test_AsyncResult(self): - task_id = uuid() - result = self.retry_task.AsyncResult(task_id) - self.assertEqual(result.backend, self.retry_task.backend) - self.assertEqual(result.id, task_id) - - def assertNextTaskDataEqual(self, consumer, presult, task_name, - test_eta=False, test_expires=False, **kwargs): - next_task = consumer.queues[0].get(accept=['pickle']) - task_data = next_task.decode() - self.assertEqual(task_data['id'], presult.id) - self.assertEqual(task_data['task'], task_name) - task_kwargs = task_data.get('kwargs', {}) - if test_eta: - self.assertIsInstance(task_data.get('eta'), string_t) - to_datetime = parse_iso8601(task_data.get('eta')) - self.assertIsInstance(to_datetime, datetime) - if test_expires: - self.assertIsInstance(task_data.get('expires'), string_t) - to_datetime = parse_iso8601(task_data.get('expires')) - self.assertIsInstance(to_datetime, datetime) - for arg_name, arg_value in items(kwargs): - self.assertEqual(task_kwargs.get(arg_name), arg_value) - - def test_incomplete_task_cls(self): - - class IncompleteTask(Task): - app = self.app - name = 'c.unittest.t.itask' - - with self.assertRaises(NotImplementedError): - IncompleteTask().run() - - def test_task_kwargs_must_be_dictionary(self): - with self.assertRaises(ValueError): - self.increment_counter.apply_async([], 'str') - - def test_task_args_must_be_list(self): - with self.assertRaises(ValueError): - self.increment_counter.apply_async('str', {}) - - def test_regular_task(self): - self.assertIsInstance(self.mytask, Task) - self.assertTrue(self.mytask.run()) - self.assertTrue( - callable(self.mytask), 'Task class is callable()', - ) - self.assertTrue(self.mytask(), 'Task class runs run() when called') - - with self.app.connection_or_acquire() as conn: - consumer = self.app.amqp.TaskConsumer(conn) - with self.assertRaises(NotImplementedError): - consumer.receive('foo', 'foo') - consumer.purge() - self.assertIsNone(consumer.queues[0].get()) - self.app.amqp.TaskConsumer(conn, queues=[Queue('foo')]) - - # Without arguments. - presult = self.mytask.delay() - self.assertNextTaskDataEqual(consumer, presult, self.mytask.name) - - # With arguments. - presult2 = self.mytask.apply_async( - kwargs=dict(name='George Costanza'), - ) - self.assertNextTaskDataEqual( - consumer, presult2, self.mytask.name, name='George Costanza', - ) - - # send_task - sresult = self.app.send_task(self.mytask.name, - kwargs=dict(name='Elaine M. Benes')) - self.assertNextTaskDataEqual( - consumer, sresult, self.mytask.name, name='Elaine M. Benes', - ) - - # With eta. - presult2 = self.mytask.apply_async( - kwargs=dict(name='George Costanza'), - eta=self.now() + timedelta(days=1), - expires=self.now() + timedelta(days=2), - ) - self.assertNextTaskDataEqual( - consumer, presult2, self.mytask.name, - name='George Costanza', test_eta=True, test_expires=True, - ) - - # With countdown. - presult2 = self.mytask.apply_async( - kwargs=dict(name='George Costanza'), countdown=10, expires=12, - ) - self.assertNextTaskDataEqual( - consumer, presult2, self.mytask.name, - name='George Costanza', test_eta=True, test_expires=True, - ) - - # Discarding all tasks. - consumer.purge() - self.mytask.apply_async() - self.assertEqual(consumer.purge(), 1) - self.assertIsNone(consumer.queues[0].get()) - - self.assertFalse(presult.successful()) - self.mytask.backend.mark_as_done(presult.id, result=None) - self.assertTrue(presult.successful()) - - def test_repr_v2_compat(self): - self.mytask.__v2_compat__ = True - self.assertIn('v2 compatible', repr(self.mytask)) - - def test_apply_with_self(self): - - @self.app.task(__self__=42, shared=False) - def tawself(self): - return self - - self.assertEqual(tawself.apply().get(), 42) - - self.assertEqual(tawself(), 42) - - def test_context_get(self): - self.mytask.push_request() - try: - request = self.mytask.request - request.foo = 32 - self.assertEqual(request.get('foo'), 32) - self.assertEqual(request.get('bar', 36), 36) - request.clear() - finally: - self.mytask.pop_request() - - def test_task_class_repr(self): - self.assertIn('class Task of', repr(self.mytask.app.Task)) - self.mytask.app.Task._app = None - self.assertIn('unbound', repr(self.mytask.app.Task, )) - - def test_bind_no_magic_kwargs(self): - self.mytask.accept_magic_kwargs = None - self.mytask.bind(self.mytask.app) - - def test_annotate(self): - with patch('celery.app.task.resolve_all_annotations') as anno: - anno.return_value = [{'FOO': 'BAR'}] - - @self.app.task(shared=False) - def task(): - pass - task.annotate() - self.assertEqual(task.FOO, 'BAR') - - def test_after_return(self): - self.mytask.push_request() - try: - self.mytask.request.chord = self.mytask.s() - self.mytask.after_return('SUCCESS', 1.0, 'foobar', (), {}, None) - self.mytask.request.clear() - finally: - self.mytask.pop_request() - - def test_send_task_sent_event(self): - with self.app.connection() as conn: - self.app.conf.CELERY_SEND_TASK_SENT_EVENT = True - self.assertTrue(self.app.amqp.TaskProducer(conn).send_sent_event) - - def test_update_state(self): - - @self.app.task(shared=False) - def yyy(): - pass - - yyy.push_request() - try: - tid = uuid() - yyy.update_state(tid, 'FROBULATING', {'fooz': 'baaz'}) - self.assertEqual(yyy.AsyncResult(tid).status, 'FROBULATING') - self.assertDictEqual(yyy.AsyncResult(tid).result, {'fooz': 'baaz'}) - - yyy.request.id = tid - yyy.update_state(state='FROBUZATING', meta={'fooz': 'baaz'}) - self.assertEqual(yyy.AsyncResult(tid).status, 'FROBUZATING') - self.assertDictEqual(yyy.AsyncResult(tid).result, {'fooz': 'baaz'}) - finally: - yyy.pop_request() - - def test_repr(self): - - @self.app.task(shared=False) - def task_test_repr(): - pass - - self.assertIn('task_test_repr', repr(task_test_repr)) - - def test_has___name__(self): - - @self.app.task(shared=False) - def yyy2(): - pass - - self.assertTrue(yyy2.__name__) - - -class test_apply_task(TasksCase): - - def test_apply_throw(self): - with self.assertRaises(KeyError): - self.raising.apply(throw=True) - - def test_apply_with_CELERY_EAGER_PROPAGATES_EXCEPTIONS(self): - self.app.conf.CELERY_EAGER_PROPAGATES_EXCEPTIONS = True - with self.assertRaises(KeyError): - self.raising.apply() - - def test_apply(self): - self.increment_counter.count = 0 - - e = self.increment_counter.apply() - self.assertIsInstance(e, EagerResult) - self.assertEqual(e.get(), 1) - - e = self.increment_counter.apply(args=[1]) - self.assertEqual(e.get(), 2) - - e = self.increment_counter.apply(kwargs={'increment_by': 4}) - self.assertEqual(e.get(), 6) - - self.assertTrue(e.successful()) - self.assertTrue(e.ready()) - self.assertTrue(repr(e).startswith('> 2, Proxy(lambda: 2)) - self.assertEqual(Proxy(lambda: 10) ^ 7, Proxy(lambda: 13)) - self.assertEqual(Proxy(lambda: 10) | 40, Proxy(lambda: 42)) - self.assertEqual(~Proxy(lambda: 10), Proxy(lambda: -11)) - self.assertEqual(-Proxy(lambda: 10), Proxy(lambda: -10)) - self.assertEqual(+Proxy(lambda: -10), Proxy(lambda: -10)) - self.assertTrue(Proxy(lambda: 10) < Proxy(lambda: 20)) - self.assertTrue(Proxy(lambda: 20) > Proxy(lambda: 10)) - self.assertTrue(Proxy(lambda: 10) >= Proxy(lambda: 10)) - self.assertTrue(Proxy(lambda: 10) <= Proxy(lambda: 10)) - self.assertTrue(Proxy(lambda: 10) == Proxy(lambda: 10)) - self.assertTrue(Proxy(lambda: 20) != Proxy(lambda: 10)) - self.assertTrue(Proxy(lambda: 100).__divmod__(30)) - self.assertTrue(Proxy(lambda: 100).__truediv__(30)) - self.assertTrue(abs(Proxy(lambda: -100))) - - x = Proxy(lambda: 10) - x -= 1 - self.assertEqual(x, 9) - x = Proxy(lambda: 9) - x += 1 - self.assertEqual(x, 10) - x = Proxy(lambda: 10) - x *= 2 - self.assertEqual(x, 20) - x = Proxy(lambda: 20) - x /= 2 - self.assertEqual(x, 10) - x = Proxy(lambda: 10) - x %= 2 - self.assertEqual(x, 0) - x = Proxy(lambda: 10) - x <<= 3 - self.assertEqual(x, 80) - x = Proxy(lambda: 80) - x >>= 4 - self.assertEqual(x, 5) - x = Proxy(lambda: 5) - x ^= 1 - self.assertEqual(x, 4) - x = Proxy(lambda: 4) - x **= 4 - self.assertEqual(x, 256) - x = Proxy(lambda: 256) - x //= 2 - self.assertEqual(x, 128) - x = Proxy(lambda: 128) - x |= 2 - self.assertEqual(x, 130) - x = Proxy(lambda: 130) - x &= 10 - self.assertEqual(x, 2) - - x = Proxy(lambda: 10) - self.assertEqual(type(x.__float__()), float) - self.assertEqual(type(x.__int__()), int) - if not PY3: - self.assertEqual(type(x.__long__()), long_t) - self.assertTrue(hex(x)) - self.assertTrue(oct(x)) - - def test_hash(self): - - class X(object): - - def __hash__(self): - return 1234 - - self.assertEqual(hash(Proxy(lambda: X())), 1234) - - def test_call(self): - - class X(object): - - def __call__(self): - return 1234 - - self.assertEqual(Proxy(lambda: X())(), 1234) - - def test_context(self): - - class X(object): - entered = exited = False - - def __enter__(self): - self.entered = True - return 1234 - - def __exit__(self, *exc_info): - self.exited = True - - v = X() - x = Proxy(lambda: v) - with x as val: - self.assertEqual(val, 1234) - self.assertTrue(x.entered) - self.assertTrue(x.exited) - - def test_reduce(self): - - class X(object): - - def __reduce__(self): - return 123 - - x = Proxy(lambda: X()) - self.assertEqual(x.__reduce__(), 123) - - -class test_PromiseProxy(Case): - - def test_only_evaluated_once(self): - - class X(object): - attr = 123 - evals = 0 - - def __init__(self): - self.__class__.evals += 1 - - p = PromiseProxy(X) - self.assertEqual(p.attr, 123) - self.assertEqual(p.attr, 123) - self.assertEqual(X.evals, 1) - - def test_callbacks(self): - source = Mock(name='source') - p = PromiseProxy(source) - cbA = Mock(name='cbA') - cbB = Mock(name='cbB') - cbC = Mock(name='cbC') - p.__then__(cbA, p) - p.__then__(cbB, p) - self.assertFalse(p.__evaluated__()) - self.assertTrue(object.__getattribute__(p, '__pending__')) - - self.assertTrue(repr(p)) - self.assertTrue(p.__evaluated__()) - with self.assertRaises(AttributeError): - object.__getattribute__(p, '__pending__') - cbA.assert_called_with(p) - cbB.assert_called_with(p) - - self.assertTrue(p.__evaluated__()) - p.__then__(cbC, p) - cbC.assert_called_with(p) - - with self.assertRaises(AttributeError): - object.__getattribute__(p, '__pending__') - - def test_maybe_evaluate(self): - x = PromiseProxy(lambda: 30) - self.assertFalse(x.__evaluated__()) - self.assertEqual(maybe_evaluate(x), 30) - self.assertEqual(maybe_evaluate(x), 30) - - self.assertEqual(maybe_evaluate(30), 30) - self.assertTrue(x.__evaluated__()) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_mail.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_mail.py deleted file mode 100644 index e4fc965..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_mail.py +++ /dev/null @@ -1,53 +0,0 @@ -from __future__ import absolute_import - -from celery.utils.mail import Message, Mailer, SSLError - -from celery.tests.case import Case, Mock, patch - - -msg = Message(to='george@vandelay.com', sender='elaine@pendant.com', - subject="What's up with Jerry?", body='???!') - - -class test_Message(Case): - - def test_repr(self): - self.assertTrue(repr(msg)) - - def test_str(self): - self.assertTrue(str(msg)) - - -class test_Mailer(Case): - - def test_send_wrapper(self): - mailer = Mailer() - mailer._send = Mock() - mailer.send(msg) - mailer._send.assert_called_with(msg) - - @patch('smtplib.SMTP_SSL', create=True) - def test_send_ssl_tls(self, SMTP_SSL): - mailer = Mailer(use_ssl=True, use_tls=True) - client = SMTP_SSL.return_value = Mock() - mailer._send(msg) - self.assertTrue(client.starttls.called) - self.assertEqual(client.ehlo.call_count, 2) - client.quit.assert_called_with() - client.sendmail.assert_called_with(msg.sender, msg.to, str(msg)) - mailer = Mailer(use_ssl=True, use_tls=True, user='foo', - password='bar') - mailer._send(msg) - client.login.assert_called_with('foo', 'bar') - - @patch('smtplib.SMTP') - def test_send(self, SMTP): - client = SMTP.return_value = Mock() - mailer = Mailer(use_ssl=False, use_tls=False) - mailer._send(msg) - - client.sendmail.assert_called_with(msg.sender, msg.to, str(msg)) - - client.quit.side_effect = SSLError() - mailer._send(msg) - client.close.assert_called_with() diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_pickle.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_pickle.py deleted file mode 100644 index 6b65bb3..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_pickle.py +++ /dev/null @@ -1,51 +0,0 @@ -from __future__ import absolute_import - -from celery.utils.serialization import pickle -from celery.tests.case import Case - - -class RegularException(Exception): - pass - - -class ArgOverrideException(Exception): - - def __init__(self, message, status_code=10): - self.status_code = status_code - Exception.__init__(self, message, status_code) - - -class test_Pickle(Case): - - def test_pickle_regular_exception(self): - exc = None - try: - raise RegularException('RegularException raised') - except RegularException as exc_: - exc = exc_ - - pickled = pickle.dumps({'exception': exc}) - unpickled = pickle.loads(pickled) - exception = unpickled.get('exception') - self.assertTrue(exception) - self.assertIsInstance(exception, RegularException) - self.assertTupleEqual(exception.args, ('RegularException raised', )) - - def test_pickle_arg_override_exception(self): - - exc = None - try: - raise ArgOverrideException( - 'ArgOverrideException raised', status_code=100, - ) - except ArgOverrideException as exc_: - exc = exc_ - - pickled = pickle.dumps({'exception': exc}) - unpickled = pickle.loads(pickled) - exception = unpickled.get('exception') - self.assertTrue(exception) - self.assertIsInstance(exception, ArgOverrideException) - self.assertTupleEqual(exception.args, ( - 'ArgOverrideException raised', 100)) - self.assertEqual(exception.status_code, 100) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_platforms.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_platforms.py deleted file mode 100644 index 4f2c584..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_platforms.py +++ /dev/null @@ -1,713 +0,0 @@ -from __future__ import absolute_import - -import errno -import os -import sys -import signal -import tempfile - -from celery import _find_option_with_arg -from celery import platforms -from celery.five import open_fqdn -from celery.platforms import ( - get_fdmax, - ignore_errno, - set_process_title, - signals, - maybe_drop_privileges, - setuid, - setgid, - initgroups, - parse_uid, - parse_gid, - detached, - DaemonContext, - create_pidlock, - Pidfile, - LockFailed, - setgroups, - _setgroups_hack, - close_open_fds, - fd_by_path, -) - -try: - import resource -except ImportError: # pragma: no cover - resource = None # noqa - -from celery.tests.case import ( - Case, WhateverIO, Mock, SkipTest, - call, override_stdouts, mock_open, patch, -) - - -class test_find_option_with_arg(Case): - - def test_long_opt(self): - self.assertEqual( - _find_option_with_arg(['--foo=bar'], long_opts=['--foo']), - 'bar' - ) - - def test_short_opt(self): - self.assertEqual( - _find_option_with_arg(['-f', 'bar'], short_opts=['-f']), - 'bar' - ) - - -class test_fd_by_path(Case): - - def test_finds(self): - test_file = tempfile.NamedTemporaryFile() - keep = fd_by_path([test_file.name]) - self.assertEqual(keep, [test_file.file.fileno()]) - test_file.close() - - -class test_close_open_fds(Case): - - def test_closes(self): - with patch('os.close') as _close: - with patch('os.closerange', create=True) as closerange: - with patch('celery.platforms.get_fdmax') as fdmax: - fdmax.return_value = 3 - close_open_fds() - if not closerange.called: - _close.assert_has_calls([call(2), call(1), call(0)]) - _close.side_effect = OSError() - _close.side_effect.errno = errno.EBADF - close_open_fds() - - -class test_ignore_errno(Case): - - def test_raises_EBADF(self): - with ignore_errno('EBADF'): - exc = OSError() - exc.errno = errno.EBADF - raise exc - - def test_otherwise(self): - with self.assertRaises(OSError): - with ignore_errno('EBADF'): - exc = OSError() - exc.errno = errno.ENOENT - raise exc - - -class test_set_process_title(Case): - - def when_no_setps(self): - prev = platforms._setproctitle = platforms._setproctitle, None - try: - set_process_title('foo') - finally: - platforms._setproctitle = prev - - -class test_Signals(Case): - - @patch('signal.getsignal') - def test_getitem(self, getsignal): - signals['SIGINT'] - getsignal.assert_called_with(signal.SIGINT) - - def test_supported(self): - self.assertTrue(signals.supported('INT')) - self.assertFalse(signals.supported('SIGIMAGINARY')) - - def test_reset_alarm(self): - if sys.platform == 'win32': - raise SkipTest('signal.alarm not available on Windows') - with patch('signal.alarm') as _alarm: - signals.reset_alarm() - _alarm.assert_called_with(0) - - def test_arm_alarm(self): - if hasattr(signal, 'setitimer'): - with patch('signal.setitimer', create=True) as seti: - signals.arm_alarm(30) - self.assertTrue(seti.called) - - def test_signum(self): - self.assertEqual(signals.signum(13), 13) - self.assertEqual(signals.signum('INT'), signal.SIGINT) - self.assertEqual(signals.signum('SIGINT'), signal.SIGINT) - with self.assertRaises(TypeError): - signals.signum('int') - signals.signum(object()) - - @patch('signal.signal') - def test_ignore(self, set): - signals.ignore('SIGINT') - set.assert_called_with(signals.signum('INT'), signals.ignored) - signals.ignore('SIGTERM') - set.assert_called_with(signals.signum('TERM'), signals.ignored) - - @patch('signal.signal') - def test_setitem(self, set): - def handle(*a): - return a - signals['INT'] = handle - set.assert_called_with(signal.SIGINT, handle) - - @patch('signal.signal') - def test_setitem_raises(self, set): - set.side_effect = ValueError() - signals['INT'] = lambda *a: a - - -if not platforms.IS_WINDOWS: - - class test_get_fdmax(Case): - - @patch('resource.getrlimit') - def test_when_infinity(self, getrlimit): - with patch('os.sysconf') as sysconfig: - sysconfig.side_effect = KeyError() - getrlimit.return_value = [None, resource.RLIM_INFINITY] - default = object() - self.assertIs(get_fdmax(default), default) - - @patch('resource.getrlimit') - def test_when_actual(self, getrlimit): - with patch('os.sysconf') as sysconfig: - sysconfig.side_effect = KeyError() - getrlimit.return_value = [None, 13] - self.assertEqual(get_fdmax(None), 13) - - class test_maybe_drop_privileges(Case): - - @patch('celery.platforms.parse_uid') - @patch('pwd.getpwuid') - @patch('celery.platforms.setgid') - @patch('celery.platforms.setuid') - @patch('celery.platforms.initgroups') - def test_with_uid(self, initgroups, setuid, setgid, - getpwuid, parse_uid): - - class pw_struct(object): - pw_gid = 50001 - - def raise_on_second_call(*args, **kwargs): - setuid.side_effect = OSError() - setuid.side_effect.errno = errno.EPERM - setuid.side_effect = raise_on_second_call - getpwuid.return_value = pw_struct() - parse_uid.return_value = 5001 - maybe_drop_privileges(uid='user') - parse_uid.assert_called_with('user') - getpwuid.assert_called_with(5001) - setgid.assert_called_with(50001) - initgroups.assert_called_with(5001, 50001) - setuid.assert_has_calls([call(5001), call(0)]) - - @patch('celery.platforms.parse_uid') - @patch('celery.platforms.parse_gid') - @patch('celery.platforms.setgid') - @patch('celery.platforms.setuid') - @patch('celery.platforms.initgroups') - def test_with_guid(self, initgroups, setuid, setgid, - parse_gid, parse_uid): - - def raise_on_second_call(*args, **kwargs): - setuid.side_effect = OSError() - setuid.side_effect.errno = errno.EPERM - setuid.side_effect = raise_on_second_call - parse_uid.return_value = 5001 - parse_gid.return_value = 50001 - maybe_drop_privileges(uid='user', gid='group') - parse_uid.assert_called_with('user') - parse_gid.assert_called_with('group') - setgid.assert_called_with(50001) - initgroups.assert_called_with(5001, 50001) - setuid.assert_has_calls([call(5001), call(0)]) - - setuid.side_effect = None - with self.assertRaises(RuntimeError): - maybe_drop_privileges(uid='user', gid='group') - setuid.side_effect = OSError() - setuid.side_effect.errno = errno.EINVAL - with self.assertRaises(OSError): - maybe_drop_privileges(uid='user', gid='group') - - @patch('celery.platforms.setuid') - @patch('celery.platforms.setgid') - @patch('celery.platforms.parse_gid') - def test_only_gid(self, parse_gid, setgid, setuid): - parse_gid.return_value = 50001 - maybe_drop_privileges(gid='group') - parse_gid.assert_called_with('group') - setgid.assert_called_with(50001) - self.assertFalse(setuid.called) - - class test_setget_uid_gid(Case): - - @patch('celery.platforms.parse_uid') - @patch('os.setuid') - def test_setuid(self, _setuid, parse_uid): - parse_uid.return_value = 5001 - setuid('user') - parse_uid.assert_called_with('user') - _setuid.assert_called_with(5001) - - @patch('celery.platforms.parse_gid') - @patch('os.setgid') - def test_setgid(self, _setgid, parse_gid): - parse_gid.return_value = 50001 - setgid('group') - parse_gid.assert_called_with('group') - _setgid.assert_called_with(50001) - - def test_parse_uid_when_int(self): - self.assertEqual(parse_uid(5001), 5001) - - @patch('pwd.getpwnam') - def test_parse_uid_when_existing_name(self, getpwnam): - - class pwent(object): - pw_uid = 5001 - - getpwnam.return_value = pwent() - self.assertEqual(parse_uid('user'), 5001) - - @patch('pwd.getpwnam') - def test_parse_uid_when_nonexisting_name(self, getpwnam): - getpwnam.side_effect = KeyError('user') - - with self.assertRaises(KeyError): - parse_uid('user') - - def test_parse_gid_when_int(self): - self.assertEqual(parse_gid(50001), 50001) - - @patch('grp.getgrnam') - def test_parse_gid_when_existing_name(self, getgrnam): - - class grent(object): - gr_gid = 50001 - - getgrnam.return_value = grent() - self.assertEqual(parse_gid('group'), 50001) - - @patch('grp.getgrnam') - def test_parse_gid_when_nonexisting_name(self, getgrnam): - getgrnam.side_effect = KeyError('group') - - with self.assertRaises(KeyError): - parse_gid('group') - - class test_initgroups(Case): - - @patch('pwd.getpwuid') - @patch('os.initgroups', create=True) - def test_with_initgroups(self, initgroups_, getpwuid): - getpwuid.return_value = ['user'] - initgroups(5001, 50001) - initgroups_.assert_called_with('user', 50001) - - @patch('celery.platforms.setgroups') - @patch('grp.getgrall') - @patch('pwd.getpwuid') - def test_without_initgroups(self, getpwuid, getgrall, setgroups): - prev = getattr(os, 'initgroups', None) - try: - delattr(os, 'initgroups') - except AttributeError: - pass - try: - getpwuid.return_value = ['user'] - - class grent(object): - gr_mem = ['user'] - - def __init__(self, gid): - self.gr_gid = gid - - getgrall.return_value = [grent(1), grent(2), grent(3)] - initgroups(5001, 50001) - setgroups.assert_called_with([1, 2, 3]) - finally: - if prev: - os.initgroups = prev - - class test_detached(Case): - - def test_without_resource(self): - prev, platforms.resource = platforms.resource, None - try: - with self.assertRaises(RuntimeError): - detached() - finally: - platforms.resource = prev - - @patch('celery.platforms._create_pidlock') - @patch('celery.platforms.signals') - @patch('celery.platforms.maybe_drop_privileges') - @patch('os.geteuid') - @patch(open_fqdn) - def test_default(self, open, geteuid, maybe_drop, - signals, pidlock): - geteuid.return_value = 0 - context = detached(uid='user', gid='group') - self.assertIsInstance(context, DaemonContext) - signals.reset.assert_called_with('SIGCLD') - maybe_drop.assert_called_with(uid='user', gid='group') - open.return_value = Mock() - - geteuid.return_value = 5001 - context = detached(uid='user', gid='group', logfile='/foo/bar') - self.assertIsInstance(context, DaemonContext) - self.assertTrue(context.after_chdir) - context.after_chdir() - open.assert_called_with('/foo/bar', 'a') - open.return_value.close.assert_called_with() - - context = detached(pidfile='/foo/bar/pid') - self.assertIsInstance(context, DaemonContext) - self.assertTrue(context.after_chdir) - context.after_chdir() - pidlock.assert_called_with('/foo/bar/pid') - - class test_DaemonContext(Case): - - @patch('os.fork') - @patch('os.setsid') - @patch('os._exit') - @patch('os.chdir') - @patch('os.umask') - @patch('os.close') - @patch('os.closerange') - @patch('os.open') - @patch('os.dup2') - def test_open(self, dup2, open, close, closer, umask, chdir, - _exit, setsid, fork): - x = DaemonContext(workdir='/opt/workdir', umask=0o22) - x.stdfds = [0, 1, 2] - - fork.return_value = 0 - with x: - self.assertTrue(x._is_open) - with x: - pass - self.assertEqual(fork.call_count, 2) - setsid.assert_called_with() - self.assertFalse(_exit.called) - - chdir.assert_called_with(x.workdir) - umask.assert_called_with(0o22) - self.assertTrue(dup2.called) - - fork.reset_mock() - fork.return_value = 1 - x = DaemonContext(workdir='/opt/workdir') - x.stdfds = [0, 1, 2] - with x: - pass - self.assertEqual(fork.call_count, 1) - _exit.assert_called_with(0) - - x = DaemonContext(workdir='/opt/workdir', fake=True) - x.stdfds = [0, 1, 2] - x._detach = Mock() - with x: - pass - self.assertFalse(x._detach.called) - - x.after_chdir = Mock() - with x: - pass - x.after_chdir.assert_called_with() - - class test_Pidfile(Case): - - @patch('celery.platforms.Pidfile') - def test_create_pidlock(self, Pidfile): - p = Pidfile.return_value = Mock() - p.is_locked.return_value = True - p.remove_if_stale.return_value = False - with override_stdouts() as (_, err): - with self.assertRaises(SystemExit): - create_pidlock('/var/pid') - self.assertIn('already exists', err.getvalue()) - - p.remove_if_stale.return_value = True - ret = create_pidlock('/var/pid') - self.assertIs(ret, p) - - def test_context(self): - p = Pidfile('/var/pid') - p.write_pid = Mock() - p.remove = Mock() - - with p as _p: - self.assertIs(_p, p) - p.write_pid.assert_called_with() - p.remove.assert_called_with() - - def test_acquire_raises_LockFailed(self): - p = Pidfile('/var/pid') - p.write_pid = Mock() - p.write_pid.side_effect = OSError() - - with self.assertRaises(LockFailed): - with p: - pass - - @patch('os.path.exists') - def test_is_locked(self, exists): - p = Pidfile('/var/pid') - exists.return_value = True - self.assertTrue(p.is_locked()) - exists.return_value = False - self.assertFalse(p.is_locked()) - - def test_read_pid(self): - with mock_open() as s: - s.write('1816\n') - s.seek(0) - p = Pidfile('/var/pid') - self.assertEqual(p.read_pid(), 1816) - - def test_read_pid_partially_written(self): - with mock_open() as s: - s.write('1816') - s.seek(0) - p = Pidfile('/var/pid') - with self.assertRaises(ValueError): - p.read_pid() - - def test_read_pid_raises_ENOENT(self): - exc = IOError() - exc.errno = errno.ENOENT - with mock_open(side_effect=exc): - p = Pidfile('/var/pid') - self.assertIsNone(p.read_pid()) - - def test_read_pid_raises_IOError(self): - exc = IOError() - exc.errno = errno.EAGAIN - with mock_open(side_effect=exc): - p = Pidfile('/var/pid') - with self.assertRaises(IOError): - p.read_pid() - - def test_read_pid_bogus_pidfile(self): - with mock_open() as s: - s.write('eighteensixteen\n') - s.seek(0) - p = Pidfile('/var/pid') - with self.assertRaises(ValueError): - p.read_pid() - - @patch('os.unlink') - def test_remove(self, unlink): - unlink.return_value = True - p = Pidfile('/var/pid') - p.remove() - unlink.assert_called_with(p.path) - - @patch('os.unlink') - def test_remove_ENOENT(self, unlink): - exc = OSError() - exc.errno = errno.ENOENT - unlink.side_effect = exc - p = Pidfile('/var/pid') - p.remove() - unlink.assert_called_with(p.path) - - @patch('os.unlink') - def test_remove_EACCES(self, unlink): - exc = OSError() - exc.errno = errno.EACCES - unlink.side_effect = exc - p = Pidfile('/var/pid') - p.remove() - unlink.assert_called_with(p.path) - - @patch('os.unlink') - def test_remove_OSError(self, unlink): - exc = OSError() - exc.errno = errno.EAGAIN - unlink.side_effect = exc - p = Pidfile('/var/pid') - with self.assertRaises(OSError): - p.remove() - unlink.assert_called_with(p.path) - - @patch('os.kill') - def test_remove_if_stale_process_alive(self, kill): - p = Pidfile('/var/pid') - p.read_pid = Mock() - p.read_pid.return_value = 1816 - kill.return_value = 0 - self.assertFalse(p.remove_if_stale()) - kill.assert_called_with(1816, 0) - p.read_pid.assert_called_with() - - kill.side_effect = OSError() - kill.side_effect.errno = errno.ENOENT - self.assertFalse(p.remove_if_stale()) - - @patch('os.kill') - def test_remove_if_stale_process_dead(self, kill): - with override_stdouts(): - p = Pidfile('/var/pid') - p.read_pid = Mock() - p.read_pid.return_value = 1816 - p.remove = Mock() - exc = OSError() - exc.errno = errno.ESRCH - kill.side_effect = exc - self.assertTrue(p.remove_if_stale()) - kill.assert_called_with(1816, 0) - p.remove.assert_called_with() - - def test_remove_if_stale_broken_pid(self): - with override_stdouts(): - p = Pidfile('/var/pid') - p.read_pid = Mock() - p.read_pid.side_effect = ValueError() - p.remove = Mock() - - self.assertTrue(p.remove_if_stale()) - p.remove.assert_called_with() - - def test_remove_if_stale_no_pidfile(self): - p = Pidfile('/var/pid') - p.read_pid = Mock() - p.read_pid.return_value = None - p.remove = Mock() - - self.assertTrue(p.remove_if_stale()) - p.remove.assert_called_with() - - @patch('os.fsync') - @patch('os.getpid') - @patch('os.open') - @patch('os.fdopen') - @patch(open_fqdn) - def test_write_pid(self, open_, fdopen, osopen, getpid, fsync): - getpid.return_value = 1816 - osopen.return_value = 13 - w = fdopen.return_value = WhateverIO() - w.close = Mock() - r = open_.return_value = WhateverIO() - r.write('1816\n') - r.seek(0) - - p = Pidfile('/var/pid') - p.write_pid() - w.seek(0) - self.assertEqual(w.readline(), '1816\n') - self.assertTrue(w.close.called) - getpid.assert_called_with() - osopen.assert_called_with(p.path, platforms.PIDFILE_FLAGS, - platforms.PIDFILE_MODE) - fdopen.assert_called_with(13, 'w') - fsync.assert_called_with(13) - open_.assert_called_with(p.path) - - @patch('os.fsync') - @patch('os.getpid') - @patch('os.open') - @patch('os.fdopen') - @patch(open_fqdn) - def test_write_reread_fails(self, open_, fdopen, - osopen, getpid, fsync): - getpid.return_value = 1816 - osopen.return_value = 13 - w = fdopen.return_value = WhateverIO() - w.close = Mock() - r = open_.return_value = WhateverIO() - r.write('11816\n') - r.seek(0) - - p = Pidfile('/var/pid') - with self.assertRaises(LockFailed): - p.write_pid() - - class test_setgroups(Case): - - @patch('os.setgroups', create=True) - def test_setgroups_hack_ValueError(self, setgroups): - - def on_setgroups(groups): - if len(groups) <= 200: - setgroups.return_value = True - return - raise ValueError() - setgroups.side_effect = on_setgroups - _setgroups_hack(list(range(400))) - - setgroups.side_effect = ValueError() - with self.assertRaises(ValueError): - _setgroups_hack(list(range(400))) - - @patch('os.setgroups', create=True) - def test_setgroups_hack_OSError(self, setgroups): - exc = OSError() - exc.errno = errno.EINVAL - - def on_setgroups(groups): - if len(groups) <= 200: - setgroups.return_value = True - return - raise exc - setgroups.side_effect = on_setgroups - - _setgroups_hack(list(range(400))) - - setgroups.side_effect = exc - with self.assertRaises(OSError): - _setgroups_hack(list(range(400))) - - exc2 = OSError() - exc.errno = errno.ESRCH - setgroups.side_effect = exc2 - with self.assertRaises(OSError): - _setgroups_hack(list(range(400))) - - @patch('os.sysconf') - @patch('celery.platforms._setgroups_hack') - def test_setgroups(self, hack, sysconf): - sysconf.return_value = 100 - setgroups(list(range(400))) - hack.assert_called_with(list(range(100))) - - @patch('os.sysconf') - @patch('celery.platforms._setgroups_hack') - def test_setgroups_sysconf_raises(self, hack, sysconf): - sysconf.side_effect = ValueError() - setgroups(list(range(400))) - hack.assert_called_with(list(range(400))) - - @patch('os.getgroups') - @patch('os.sysconf') - @patch('celery.platforms._setgroups_hack') - def test_setgroups_raises_ESRCH(self, hack, sysconf, getgroups): - sysconf.side_effect = ValueError() - esrch = OSError() - esrch.errno = errno.ESRCH - hack.side_effect = esrch - with self.assertRaises(OSError): - setgroups(list(range(400))) - - @patch('os.getgroups') - @patch('os.sysconf') - @patch('celery.platforms._setgroups_hack') - def test_setgroups_raises_EPERM(self, hack, sysconf, getgroups): - sysconf.side_effect = ValueError() - eperm = OSError() - eperm.errno = errno.EPERM - hack.side_effect = eperm - getgroups.return_value = list(range(400)) - setgroups(list(range(400))) - getgroups.assert_called_with() - - getgroups.return_value = [1000] - with self.assertRaises(OSError): - setgroups(list(range(400))) - getgroups.assert_called_with() diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_saferef.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_saferef.py deleted file mode 100644 index 9c18d71..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_saferef.py +++ /dev/null @@ -1,94 +0,0 @@ -from __future__ import absolute_import - -from celery.five import range -from celery.utils.dispatch.saferef import safe_ref -from celery.tests.case import Case - - -class Class1(object): - - def x(self): - pass - - -def fun(obj): - pass - - -class Class2(object): - - def __call__(self, obj): - pass - - -class SaferefTests(Case): - - def setUp(self): - ts = [] - ss = [] - for x in range(5000): - t = Class1() - ts.append(t) - s = safe_ref(t.x, self._closure) - ss.append(s) - ts.append(fun) - ss.append(safe_ref(fun, self._closure)) - for x in range(30): - t = Class2() - ts.append(t) - s = safe_ref(t, self._closure) - ss.append(s) - self.ts = ts - self.ss = ss - self.closureCount = 0 - - def tearDown(self): - del self.ts - del self.ss - - def test_in(self): - """test_in - - Test the "in" operator for safe references (cmp) - - """ - for t in self.ts[:50]: - self.assertTrue(safe_ref(t.x) in self.ss) - - def test_valid(self): - """test_value - - Test that the references are valid (return instance methods) - - """ - for s in self.ss: - self.assertTrue(s()) - - def test_shortcircuit(self): - """test_shortcircuit - - Test that creation short-circuits to reuse existing references - - """ - sd = {} - for s in self.ss: - sd[s] = 1 - for t in self.ts: - if hasattr(t, 'x'): - self.assertIn(safe_ref(t.x), sd) - else: - self.assertIn(safe_ref(t), sd) - - def test_representation(self): - """test_representation - - Test that the reference object's representation works - - XXX Doesn't currently check the results, just that no error - is raised - """ - repr(self.ss[-1]) - - def _closure(self, ref): - """Dumb utility mechanism to increment deletion counter""" - self.closureCount += 1 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_serialization.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_serialization.py deleted file mode 100644 index 53dfdad..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_serialization.py +++ /dev/null @@ -1,42 +0,0 @@ -from __future__ import absolute_import - -import sys - -from celery.utils.serialization import ( - UnpickleableExceptionWrapper, - get_pickleable_etype, -) - -from celery.tests.case import Case, mask_modules - - -class test_AAPickle(Case): - - def test_no_cpickle(self): - prev = sys.modules.pop('celery.utils.serialization', None) - try: - with mask_modules('cPickle'): - from celery.utils.serialization import pickle - import pickle as orig_pickle - self.assertIs(pickle.dumps, orig_pickle.dumps) - finally: - sys.modules['celery.utils.serialization'] = prev - - -class test_UnpickleExceptionWrapper(Case): - - def test_init(self): - x = UnpickleableExceptionWrapper('foo', 'Bar', [10, lambda x: x]) - self.assertTrue(x.exc_args) - self.assertEqual(len(x.exc_args), 2) - - -class test_get_pickleable_etype(Case): - - def test_get_pickleable_etype(self): - - class Unpickleable(Exception): - def __reduce__(self): - raise ValueError('foo') - - self.assertIs(get_pickleable_etype(Unpickleable), Exception) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_sysinfo.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_sysinfo.py deleted file mode 100644 index 4cd32c7..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_sysinfo.py +++ /dev/null @@ -1,33 +0,0 @@ -from __future__ import absolute_import - -import os - -from celery.utils.sysinfo import load_average, df - -from celery.tests.case import Case, SkipTest, patch - - -class test_load_average(Case): - - def test_avg(self): - if not hasattr(os, 'getloadavg'): - raise SkipTest('getloadavg not available') - with patch('os.getloadavg') as getloadavg: - getloadavg.return_value = 0.54736328125, 0.6357421875, 0.69921875 - l = load_average() - self.assertTrue(l) - self.assertEqual(l, (0.55, 0.64, 0.7)) - - -class test_df(Case): - - def test_df(self): - try: - from posix import statvfs_result # noqa - except ImportError: - raise SkipTest('statvfs not available') - x = df('/') - self.assertTrue(x.total_blocks) - self.assertTrue(x.available) - self.assertTrue(x.capacity) - self.assertTrue(x.stat) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_term.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_term.py deleted file mode 100644 index 1bd7e43..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_term.py +++ /dev/null @@ -1,89 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import, unicode_literals - -import sys - -from celery.utils import term -from celery.utils.term import colored, fg -from celery.five import text_t - -from celery.tests.case import Case, SkipTest - - -class test_colored(Case): - - def setUp(self): - if sys.platform == 'win32': - raise SkipTest('Colors not supported on Windows') - - self._prev_encoding = sys.getdefaultencoding - - def getdefaultencoding(): - return 'utf-8' - - sys.getdefaultencoding = getdefaultencoding - - def tearDown(self): - sys.getdefaultencoding = self._prev_encoding - - def test_colors(self): - colors = ( - ('black', term.BLACK), - ('red', term.RED), - ('green', term.GREEN), - ('yellow', term.YELLOW), - ('blue', term.BLUE), - ('magenta', term.MAGENTA), - ('cyan', term.CYAN), - ('white', term.WHITE), - ) - - for name, key in colors: - self.assertIn(fg(30 + key), str(colored().names[name]('foo'))) - - self.assertTrue(str(colored().bold('f'))) - self.assertTrue(str(colored().underline('f'))) - self.assertTrue(str(colored().blink('f'))) - self.assertTrue(str(colored().reverse('f'))) - self.assertTrue(str(colored().bright('f'))) - self.assertTrue(str(colored().ired('f'))) - self.assertTrue(str(colored().igreen('f'))) - self.assertTrue(str(colored().iyellow('f'))) - self.assertTrue(str(colored().iblue('f'))) - self.assertTrue(str(colored().imagenta('f'))) - self.assertTrue(str(colored().icyan('f'))) - self.assertTrue(str(colored().iwhite('f'))) - self.assertTrue(str(colored().reset('f'))) - - self.assertTrue(text_t(colored().green('∂bar'))) - - self.assertTrue( - colored().red('éefoo') + colored().green('∂bar')) - - self.assertEqual( - colored().red('foo').no_color(), 'foo') - - self.assertTrue( - repr(colored().blue('åfoo'))) - - self.assertIn("''", repr(colored())) - - c = colored() - s = c.red('foo', c.blue('bar'), c.green('baz')) - self.assertTrue(s.no_color()) - - c._fold_no_color(s, 'øfoo') - c._fold_no_color('fooå', s) - - c = colored().red('åfoo') - self.assertEqual( - c._add(c, 'baræ'), - '\x1b[1;31m\xe5foo\x1b[0mbar\xe6', - ) - - c2 = colored().blue('ƒƒz') - c3 = c._add(c, c2) - self.assertEqual( - c3, - '\x1b[1;31m\xe5foo\x1b[0m\x1b[1;34m\u0192\u0192z\x1b[0m', - ) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_text.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_text.py deleted file mode 100644 index 383bdb6..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_text.py +++ /dev/null @@ -1,88 +0,0 @@ -from __future__ import absolute_import - -from celery.utils.text import ( - indent, - ensure_2lines, - abbr, - truncate, - abbrtask, - pretty, -) -from celery.tests.case import AppCase, Case - -RANDTEXT = """\ -The quick brown -fox jumps -over the -lazy dog\ -""" - -RANDTEXT_RES = """\ - The quick brown - fox jumps - over the - lazy dog\ -""" - -QUEUES = { - 'queue1': { - 'exchange': 'exchange1', - 'exchange_type': 'type1', - 'routing_key': 'bind1', - }, - 'queue2': { - 'exchange': 'exchange2', - 'exchange_type': 'type2', - 'routing_key': 'bind2', - }, -} - - -QUEUE_FORMAT1 = '.> queue1 exchange=exchange1(type1) key=bind1' -QUEUE_FORMAT2 = '.> queue2 exchange=exchange2(type2) key=bind2' - - -class test_Info(AppCase): - - def test_textindent(self): - self.assertEqual(indent(RANDTEXT, 4), RANDTEXT_RES) - - def test_format_queues(self): - self.app.amqp.queues = self.app.amqp.Queues(QUEUES) - self.assertEqual(sorted(self.app.amqp.queues.format().split('\n')), - sorted([QUEUE_FORMAT1, QUEUE_FORMAT2])) - - def test_ensure_2lines(self): - self.assertEqual( - len(ensure_2lines('foo\nbar\nbaz\n').splitlines()), 3, - ) - self.assertEqual( - len(ensure_2lines('foo\nbar').splitlines()), 2, - ) - - -class test_utils(Case): - - def test_truncate_text(self): - self.assertEqual(truncate('ABCDEFGHI', 3), 'ABC...') - self.assertEqual(truncate('ABCDEFGHI', 10), 'ABCDEFGHI') - - def test_abbr(self): - self.assertEqual(abbr(None, 3), '???') - self.assertEqual(abbr('ABCDEFGHI', 6), 'ABC...') - self.assertEqual(abbr('ABCDEFGHI', 20), 'ABCDEFGHI') - self.assertEqual(abbr('ABCDEFGHI', 6, None), 'ABCDEF') - - def test_abbrtask(self): - self.assertEqual(abbrtask(None, 3), '???') - self.assertEqual( - abbrtask('feeds.tasks.refresh', 10), - '[.]refresh', - ) - self.assertEqual( - abbrtask('feeds.tasks.refresh', 30), - 'feeds.tasks.refresh', - ) - - def test_pretty(self): - self.assertTrue(pretty(('a', 'b', 'c'))) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_threads.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_threads.py deleted file mode 100644 index b7f9c43..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_threads.py +++ /dev/null @@ -1,109 +0,0 @@ -from __future__ import absolute_import - -from celery.utils.threads import ( - _LocalStack, - _FastLocalStack, - LocalManager, - Local, - bgThread, -) - -from celery.tests.case import Case, override_stdouts, patch - - -class test_bgThread(Case): - - def test_crash(self): - - class T(bgThread): - - def body(self): - raise KeyError() - - with patch('os._exit') as _exit: - with override_stdouts(): - _exit.side_effect = ValueError() - t = T() - with self.assertRaises(ValueError): - t.run() - _exit.assert_called_with(1) - - def test_interface(self): - x = bgThread() - with self.assertRaises(NotImplementedError): - x.body() - - -class test_Local(Case): - - def test_iter(self): - x = Local() - x.foo = 'bar' - ident = x.__ident_func__() - self.assertIn((ident, {'foo': 'bar'}), list(iter(x))) - - delattr(x, 'foo') - self.assertNotIn((ident, {'foo': 'bar'}), list(iter(x))) - with self.assertRaises(AttributeError): - delattr(x, 'foo') - - self.assertIsNotNone(x(lambda: 'foo')) - - -class test_LocalStack(Case): - - def test_stack(self): - x = _LocalStack() - self.assertIsNone(x.pop()) - x.__release_local__() - ident = x.__ident_func__ - x.__ident_func__ = ident - - with self.assertRaises(RuntimeError): - x()[0] - - x.push(['foo']) - self.assertEqual(x()[0], 'foo') - x.pop() - with self.assertRaises(RuntimeError): - x()[0] - - -class test_FastLocalStack(Case): - - def test_stack(self): - x = _FastLocalStack() - x.push(['foo']) - x.push(['bar']) - self.assertEqual(x.top, ['bar']) - self.assertEqual(len(x), 2) - x.pop() - self.assertEqual(x.top, ['foo']) - x.pop() - self.assertIsNone(x.top) - - -class test_LocalManager(Case): - - def test_init(self): - x = LocalManager() - self.assertListEqual(x.locals, []) - self.assertTrue(x.ident_func) - - def ident(): - return 1 - - loc = Local() - x = LocalManager([loc], ident_func=ident) - self.assertListEqual(x.locals, [loc]) - x = LocalManager(loc, ident_func=ident) - self.assertListEqual(x.locals, [loc]) - self.assertIs(x.ident_func, ident) - self.assertIs(x.locals[0].__ident_func__, ident) - self.assertEqual(x.get_ident(), 1) - - with patch('celery.utils.threads.release_local') as release: - x.cleanup() - release.assert_called_with(loc) - - self.assertTrue(repr(x)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_timer2.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_timer2.py deleted file mode 100644 index cb18c21..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_timer2.py +++ /dev/null @@ -1,187 +0,0 @@ -from __future__ import absolute_import - -import sys -import time - -import celery.utils.timer2 as timer2 - -from celery.tests.case import Case, Mock, patch -from kombu.tests.case import redirect_stdouts - - -class test_Entry(Case): - - def test_call(self): - scratch = [None] - - def timed(x, y, moo='foo'): - scratch[0] = (x, y, moo) - - tref = timer2.Entry(timed, (4, 4), {'moo': 'baz'}) - tref() - - self.assertTupleEqual(scratch[0], (4, 4, 'baz')) - - def test_cancel(self): - tref = timer2.Entry(lambda x: x, (1, ), {}) - tref.cancel() - self.assertTrue(tref.cancelled) - - def test_repr(self): - tref = timer2.Entry(lambda x: x(1, ), {}) - self.assertTrue(repr(tref)) - - -class test_Schedule(Case): - - def test_supports_Timer_interface(self): - x = timer2.Schedule() - x.stop() - - tref = Mock() - x.cancel(tref) - tref.cancel.assert_called_with() - - self.assertIs(x.schedule, x) - - def test_handle_error(self): - from datetime import datetime - scratch = [None] - - def on_error(exc_info): - scratch[0] = exc_info - - s = timer2.Schedule(on_error=on_error) - - with patch('kombu.async.timer.to_timestamp') as tot: - tot.side_effect = OverflowError() - s.enter_at(timer2.Entry(lambda: None, (), {}), - eta=datetime.now()) - s.enter_at(timer2.Entry(lambda: None, (), {}), eta=None) - s.on_error = None - with self.assertRaises(OverflowError): - s.enter_at(timer2.Entry(lambda: None, (), {}), - eta=datetime.now()) - exc = scratch[0] - self.assertIsInstance(exc, OverflowError) - - -class test_Timer(Case): - - def test_enter_after(self): - t = timer2.Timer() - try: - done = [False] - - def set_done(): - done[0] = True - - t.call_after(0.3, set_done) - mss = 0 - while not done[0]: - if mss >= 2.0: - raise Exception('test timed out') - time.sleep(0.1) - mss += 0.1 - finally: - t.stop() - - def test_exit_after(self): - t = timer2.Timer() - t.call_after = Mock() - t.exit_after(0.3, priority=10) - t.call_after.assert_called_with(0.3, sys.exit, 10) - - def test_ensure_started_not_started(self): - t = timer2.Timer() - t.running = True - t.start = Mock() - t.ensure_started() - self.assertFalse(t.start.called) - - def test_call_repeatedly(self): - t = timer2.Timer() - try: - t.schedule.enter_after = Mock() - - myfun = Mock() - myfun.__name__ = 'myfun' - t.call_repeatedly(0.03, myfun) - - self.assertEqual(t.schedule.enter_after.call_count, 1) - args1, _ = t.schedule.enter_after.call_args_list[0] - sec1, tref1, _ = args1 - self.assertEqual(sec1, 0.03) - tref1() - - self.assertEqual(t.schedule.enter_after.call_count, 2) - args2, _ = t.schedule.enter_after.call_args_list[1] - sec2, tref2, _ = args2 - self.assertEqual(sec2, 0.03) - tref2.cancelled = True - tref2() - - self.assertEqual(t.schedule.enter_after.call_count, 2) - finally: - t.stop() - - @patch('kombu.async.timer.logger') - def test_apply_entry_error_handled(self, logger): - t = timer2.Timer() - t.schedule.on_error = None - - fun = Mock() - fun.side_effect = ValueError() - - t.schedule.apply_entry(fun) - self.assertTrue(logger.error.called) - - @redirect_stdouts - def test_apply_entry_error_not_handled(self, stdout, stderr): - t = timer2.Timer() - t.schedule.on_error = Mock() - - fun = Mock() - fun.side_effect = ValueError() - t.schedule.apply_entry(fun) - fun.assert_called_with() - self.assertFalse(stderr.getvalue()) - - @patch('os._exit') - def test_thread_crash(self, _exit): - t = timer2.Timer() - t._next_entry = Mock() - t._next_entry.side_effect = OSError(131) - t.run() - _exit.assert_called_with(1) - - def test_gc_race_lost(self): - t = timer2.Timer() - t._is_stopped.set = Mock() - t._is_stopped.set.side_effect = TypeError() - - t._is_shutdown.set() - t.run() - t._is_stopped.set.assert_called_with() - - def test_to_timestamp(self): - self.assertIs(timer2.to_timestamp(3.13), 3.13) - - def test_test_enter(self): - t = timer2.Timer() - t._do_enter = Mock() - e = Mock() - t.enter(e, 13, 0) - t._do_enter.assert_called_with('enter_at', e, 13, priority=0) - - def test_test_enter_after(self): - t = timer2.Timer() - t._do_enter = Mock() - t.enter_after() - t._do_enter.assert_called_with('enter_after') - - def test_cancel(self): - t = timer2.Timer() - tref = Mock() - t.cancel(tref) - tref.cancel.assert_called_with() diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_timeutils.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_timeutils.py deleted file mode 100644 index 2258d06..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_timeutils.py +++ /dev/null @@ -1,267 +0,0 @@ -from __future__ import absolute_import - -import pytz - -from datetime import datetime, timedelta, tzinfo -from pytz import AmbiguousTimeError - -from celery.utils.timeutils import ( - delta_resolution, - humanize_seconds, - maybe_iso8601, - maybe_timedelta, - timedelta_seconds, - timezone, - rate, - remaining, - make_aware, - maybe_make_aware, - localize, - LocalTimezone, - ffwd, - utcoffset, -) -from celery.utils.iso8601 import parse_iso8601 -from celery.tests.case import Case, Mock, patch - - -class test_LocalTimezone(Case): - - def test_daylight(self): - with patch('celery.utils.timeutils._time') as time: - time.timezone = 3600 - time.daylight = False - x = LocalTimezone() - self.assertEqual(x.STDOFFSET, timedelta(seconds=-3600)) - self.assertEqual(x.DSTOFFSET, x.STDOFFSET) - time.daylight = True - time.altzone = 3600 - y = LocalTimezone() - self.assertEqual(y.STDOFFSET, timedelta(seconds=-3600)) - self.assertEqual(y.DSTOFFSET, timedelta(seconds=-3600)) - - self.assertTrue(repr(y)) - - y._isdst = Mock() - y._isdst.return_value = True - self.assertTrue(y.utcoffset(datetime.now())) - self.assertFalse(y.dst(datetime.now())) - y._isdst.return_value = False - self.assertTrue(y.utcoffset(datetime.now())) - self.assertFalse(y.dst(datetime.now())) - - self.assertTrue(y.tzname(datetime.now())) - - -class test_iso8601(Case): - - def test_parse_with_timezone(self): - d = datetime.utcnow().replace(tzinfo=pytz.utc) - self.assertEqual(parse_iso8601(d.isoformat()), d) - # 2013-06-07T20:12:51.775877+00:00 - iso = d.isoformat() - iso1 = iso.replace('+00:00', '-01:00') - d1 = parse_iso8601(iso1) - self.assertEqual(d1.tzinfo._minutes, -60) - iso2 = iso.replace('+00:00', '+01:00') - d2 = parse_iso8601(iso2) - self.assertEqual(d2.tzinfo._minutes, +60) - iso3 = iso.replace('+00:00', 'Z') - d3 = parse_iso8601(iso3) - self.assertEqual(d3.tzinfo, pytz.UTC) - - -class test_timeutils(Case): - - def test_delta_resolution(self): - D = delta_resolution - dt = datetime(2010, 3, 30, 11, 50, 58, 41065) - deltamap = ((timedelta(days=2), datetime(2010, 3, 30, 0, 0)), - (timedelta(hours=2), datetime(2010, 3, 30, 11, 0)), - (timedelta(minutes=2), datetime(2010, 3, 30, 11, 50)), - (timedelta(seconds=2), dt)) - for delta, shoulda in deltamap: - self.assertEqual(D(dt, delta), shoulda) - - def test_timedelta_seconds(self): - deltamap = ((timedelta(seconds=1), 1), - (timedelta(seconds=27), 27), - (timedelta(minutes=3), 3 * 60), - (timedelta(hours=4), 4 * 60 * 60), - (timedelta(days=3), 3 * 86400)) - for delta, seconds in deltamap: - self.assertEqual(timedelta_seconds(delta), seconds) - - def test_timedelta_seconds_returns_0_on_negative_time(self): - delta = timedelta(days=-2) - self.assertEqual(timedelta_seconds(delta), 0) - - def test_humanize_seconds(self): - t = ((4 * 60 * 60 * 24, '4.00 days'), - (1 * 60 * 60 * 24, '1.00 day'), - (4 * 60 * 60, '4.00 hours'), - (1 * 60 * 60, '1.00 hour'), - (4 * 60, '4.00 minutes'), - (1 * 60, '1.00 minute'), - (4, '4.00 seconds'), - (1, '1.00 second'), - (4.3567631221, '4.36 seconds'), - (0, 'now')) - - for seconds, human in t: - self.assertEqual(humanize_seconds(seconds), human) - - self.assertEqual(humanize_seconds(4, prefix='about '), - 'about 4.00 seconds') - - def test_maybe_iso8601_datetime(self): - now = datetime.now() - self.assertIs(maybe_iso8601(now), now) - - def test_maybe_timedelta(self): - D = maybe_timedelta - - for i in (30, 30.6): - self.assertEqual(D(i), timedelta(seconds=i)) - - self.assertEqual(D(timedelta(days=2)), timedelta(days=2)) - - def test_remaining_relative(self): - remaining(datetime.utcnow(), timedelta(hours=1), relative=True) - - -class test_timezone(Case): - - def test_get_timezone_with_pytz(self): - self.assertTrue(timezone.get_timezone('UTC')) - - def test_tz_or_local(self): - self.assertEqual(timezone.tz_or_local(), timezone.local) - self.assertTrue(timezone.tz_or_local(timezone.utc)) - - def test_to_local(self): - self.assertTrue( - timezone.to_local(make_aware(datetime.utcnow(), timezone.utc)), - ) - self.assertTrue( - timezone.to_local(datetime.utcnow()) - ) - - def test_to_local_fallback(self): - self.assertTrue( - timezone.to_local_fallback( - make_aware(datetime.utcnow(), timezone.utc)), - ) - self.assertTrue( - timezone.to_local_fallback(datetime.utcnow()) - ) - - -class test_make_aware(Case): - - def test_tz_without_localize(self): - tz = tzinfo() - self.assertFalse(hasattr(tz, 'localize')) - wtz = make_aware(datetime.utcnow(), tz) - self.assertEqual(wtz.tzinfo, tz) - - def test_when_has_localize(self): - - class tzz(tzinfo): - raises = False - - def localize(self, dt, is_dst=None): - self.localized = True - if self.raises and is_dst is None: - self.raised = True - raise AmbiguousTimeError() - return 1 # needed by min() in Python 3 (None not hashable) - - tz = tzz() - make_aware(datetime.utcnow(), tz) - self.assertTrue(tz.localized) - - tz2 = tzz() - tz2.raises = True - make_aware(datetime.utcnow(), tz2) - self.assertTrue(tz2.localized) - self.assertTrue(tz2.raised) - - def test_maybe_make_aware(self): - aware = datetime.utcnow().replace(tzinfo=timezone.utc) - self.assertTrue(maybe_make_aware(aware), timezone.utc) - naive = datetime.utcnow() - self.assertTrue(maybe_make_aware(naive)) - - -class test_localize(Case): - - def test_tz_without_normalize(self): - tz = tzinfo() - self.assertFalse(hasattr(tz, 'normalize')) - self.assertTrue(localize(make_aware(datetime.utcnow(), tz), tz)) - - def test_when_has_normalize(self): - - class tzz(tzinfo): - raises = None - - def normalize(self, dt, **kwargs): - self.normalized = True - if self.raises and kwargs and kwargs.get('is_dst') is None: - self.raised = True - raise self.raises - return 1 # needed by min() in Python 3 (None not hashable) - - tz = tzz() - localize(make_aware(datetime.utcnow(), tz), tz) - self.assertTrue(tz.normalized) - - tz2 = tzz() - tz2.raises = AmbiguousTimeError() - localize(make_aware(datetime.utcnow(), tz2), tz2) - self.assertTrue(tz2.normalized) - self.assertTrue(tz2.raised) - - tz3 = tzz() - tz3.raises = TypeError() - localize(make_aware(datetime.utcnow(), tz3), tz3) - self.assertTrue(tz3.normalized) - self.assertTrue(tz3.raised) - - -class test_rate_limit_string(Case): - - def test_conversion(self): - self.assertEqual(rate(999), 999) - self.assertEqual(rate(7.5), 7.5) - self.assertEqual(rate('2.5/s'), 2.5) - self.assertEqual(rate('1456/s'), 1456) - self.assertEqual(rate('100/m'), - 100 / 60.0) - self.assertEqual(rate('10/h'), - 10 / 60.0 / 60.0) - - for zero in (0, None, '0', '0/m', '0/h', '0/s', '0.0/s'): - self.assertEqual(rate(zero), 0) - - -class test_ffwd(Case): - - def test_repr(self): - x = ffwd(year=2012) - self.assertTrue(repr(x)) - - def test_radd_with_unknown_gives_NotImplemented(self): - x = ffwd(year=2012) - self.assertEqual(x.__radd__(object()), NotImplemented) - - -class test_utcoffset(Case): - - def test_utcoffset(self): - with patch('celery.utils.timeutils._time') as _time: - _time.daylight = True - self.assertIsNotNone(utcoffset()) - _time.daylight = False - self.assertIsNotNone(utcoffset()) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_utils.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_utils.py deleted file mode 100644 index 2837ad6..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_utils.py +++ /dev/null @@ -1,108 +0,0 @@ -from __future__ import absolute_import - -import pytz - -from datetime import datetime, date, time, timedelta - -from kombu import Queue - -from celery.utils import ( - chunks, - is_iterable, - cached_property, - warn_deprecated, - worker_direct, - gen_task_name, - jsonify, -) -from celery.tests.case import Case, Mock, patch - - -def double(x): - return x * 2 - - -class test_worker_direct(Case): - - def test_returns_if_queue(self): - q = Queue('foo') - self.assertIs(worker_direct(q), q) - - -class test_gen_task_name(Case): - - def test_no_module(self): - app = Mock() - app.name == '__main__' - self.assertTrue(gen_task_name(app, 'foo', 'axsadaewe')) - - -class test_jsonify(Case): - - def test_simple(self): - self.assertTrue(jsonify(Queue('foo'))) - self.assertTrue(jsonify(['foo', 'bar', 'baz'])) - self.assertTrue(jsonify({'foo': 'bar'})) - self.assertTrue(jsonify(datetime.utcnow())) - self.assertTrue(jsonify(datetime.utcnow().replace(tzinfo=pytz.utc))) - self.assertTrue(jsonify(datetime.utcnow().replace(microsecond=0))) - self.assertTrue(jsonify(date(2012, 1, 1))) - self.assertTrue(jsonify(time(hour=1, minute=30))) - self.assertTrue(jsonify(time(hour=1, minute=30, microsecond=3))) - self.assertTrue(jsonify(timedelta(seconds=30))) - self.assertTrue(jsonify(10)) - self.assertTrue(jsonify(10.3)) - self.assertTrue(jsonify('hello')) - - with self.assertRaises(ValueError): - jsonify(object()) - - -class test_chunks(Case): - - def test_chunks(self): - - # n == 2 - x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 2) - self.assertListEqual( - list(x), - [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10]], - ) - - # n == 3 - x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 3) - self.assertListEqual( - list(x), - [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]], - ) - - # n == 2 (exact) - x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), 2) - self.assertListEqual( - list(x), - [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]], - ) - - -class test_utils(Case): - - def test_is_iterable(self): - for a in 'f', ['f'], ('f', ), {'f': 'f'}: - self.assertTrue(is_iterable(a)) - for b in object(), 1: - self.assertFalse(is_iterable(b)) - - def test_cached_property(self): - - def fun(obj): - return fun.value - - x = cached_property(fun) - self.assertIs(x.__get__(None), x) - self.assertIs(x.__set__(None, None), x) - self.assertIs(x.__delete__(None), x) - - @patch('warnings.warn') - def test_warn_deprecated(self, warn): - warn_deprecated('Foo') - self.assertTrue(warn.called) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_autoreload.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_autoreload.py deleted file mode 100644 index e61b330..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_autoreload.py +++ /dev/null @@ -1,328 +0,0 @@ -from __future__ import absolute_import - -import errno -import select -import sys - -from time import time - -from celery.worker import autoreload -from celery.worker.autoreload import ( - WorkerComponent, - file_hash, - BaseMonitor, - StatMonitor, - KQueueMonitor, - InotifyMonitor, - default_implementation, - Autoreloader, -) - -from celery.tests.case import AppCase, Case, Mock, SkipTest, patch, mock_open - - -class test_WorkerComponent(AppCase): - - def test_create_threaded(self): - w = Mock() - w.use_eventloop = False - x = WorkerComponent(w) - x.instantiate = Mock() - r = x.create(w) - x.instantiate.assert_called_with(w.autoreloader_cls, w) - self.assertIs(r, w.autoreloader) - - @patch('select.kevent', create=True) - @patch('select.kqueue', create=True) - @patch('kombu.utils.eventio.kqueue') - def test_create_ev(self, kq, kqueue, kevent): - w = Mock() - w.use_eventloop = True - x = WorkerComponent(w) - x.instantiate = Mock() - r = x.create(w) - x.instantiate.assert_called_with(w.autoreloader_cls, w) - x.register_with_event_loop(w, w.hub) - self.assertIsNone(r) - w.hub.on_close.add.assert_called_with( - w.autoreloader.on_event_loop_close, - ) - - -class test_file_hash(Case): - - def test_hash(self): - with mock_open() as a: - a.write('the quick brown fox\n') - a.seek(0) - A = file_hash('foo') - with mock_open() as b: - b.write('the quick brown bar\n') - b.seek(0) - B = file_hash('bar') - self.assertNotEqual(A, B) - - -class test_BaseMonitor(Case): - - def test_start_stop_on_change(self): - x = BaseMonitor(['a', 'b']) - - with self.assertRaises(NotImplementedError): - x.start() - x.stop() - x.on_change([]) - x._on_change = Mock() - x.on_change('foo') - x._on_change.assert_called_with('foo') - - -class test_StatMonitor(Case): - - @patch('os.stat') - def test_start(self, stat): - - class st(object): - st_mtime = time() - stat.return_value = st() - x = StatMonitor(['a', 'b']) - - def on_is_set(): - if x.shutdown_event.is_set.call_count > 3: - return True - return False - x.shutdown_event = Mock() - x.shutdown_event.is_set.side_effect = on_is_set - - x.start() - x.shutdown_event = Mock() - stat.side_effect = OSError() - x.start() - - @patch('os.stat') - def test_mtime_stat_raises(self, stat): - stat.side_effect = ValueError() - x = StatMonitor(['a', 'b']) - x._mtime('a') - - -class test_KQueueMonitor(Case): - - @patch('select.kqueue', create=True) - @patch('os.close') - def test_stop(self, close, kqueue): - x = KQueueMonitor(['a', 'b']) - x.poller = Mock() - x.filemap['a'] = 10 - x.stop() - x.poller.close.assert_called_with() - close.assert_called_with(10) - - close.side_effect = OSError() - close.side_effect.errno = errno.EBADF - x.stop() - - def test_register_with_event_loop(self): - from kombu.utils import eventio - if eventio.kqueue is None: - raise SkipTest('version of kombu does not work with pypy') - x = KQueueMonitor(['a', 'b']) - hub = Mock(name='hub') - x.add_events = Mock(name='add_events()') - x.register_with_event_loop(hub) - x.add_events.assert_called_with(x._kq) - self.assertEqual( - x._kq.on_file_change, - x.handle_event, - ) - - def test_on_event_loop_close(self): - x = KQueueMonitor(['a', 'b']) - x.close = Mock() - x._kq = Mock(name='_kq') - x.on_event_loop_close(Mock(name='hub')) - x.close.assert_called_with(x._kq) - - def test_handle_event(self): - x = KQueueMonitor(['a', 'b']) - x.on_change = Mock() - eA = Mock() - eA.ident = 'a' - eB = Mock() - eB.ident = 'b' - x.fdmap = {'a': 'A', 'b': 'B'} - x.handle_event([eA, eB]) - x.on_change.assert_called_with(['A', 'B']) - - @patch('kombu.utils.eventio.kqueue', create=True) - @patch('kombu.utils.eventio.kevent', create=True) - @patch('os.open') - @patch('select.kqueue', create=True) - def test_start(self, _kq, osopen, kevent, kqueue): - from kombu.utils import eventio - prev_poll, eventio.poll = eventio.poll, kqueue - prev = {} - flags = ['KQ_FILTER_VNODE', 'KQ_EV_ADD', 'KQ_EV_ENABLE', - 'KQ_EV_CLEAR', 'KQ_NOTE_WRITE', 'KQ_NOTE_EXTEND'] - for i, flag in enumerate(flags): - prev[flag] = getattr(eventio, flag, None) - if not prev[flag]: - setattr(eventio, flag, i) - try: - kq = kqueue.return_value = Mock() - - class ev(object): - ident = 10 - filter = eventio.KQ_FILTER_VNODE - fflags = eventio.KQ_NOTE_WRITE - kq.control.return_value = [ev()] - x = KQueueMonitor(['a']) - osopen.return_value = 10 - calls = [0] - - def on_is_set(): - calls[0] += 1 - if calls[0] > 2: - return True - return False - x.shutdown_event = Mock() - x.shutdown_event.is_set.side_effect = on_is_set - x.start() - finally: - for flag in flags: - if prev[flag]: - setattr(eventio, flag, prev[flag]) - else: - delattr(eventio, flag) - eventio.poll = prev_poll - - -class test_InotifyMonitor(Case): - - @patch('celery.worker.autoreload.pyinotify') - def test_start(self, inotify): - x = InotifyMonitor(['a']) - inotify.IN_MODIFY = 1 - inotify.IN_ATTRIB = 2 - x.start() - - inotify.WatchManager.side_effect = ValueError() - with self.assertRaises(ValueError): - x.start() - x.stop() - - x._on_change = None - x.process_(Mock()) - x._on_change = Mock() - x.process_(Mock()) - self.assertTrue(x._on_change.called) - - -class test_default_implementation(Case): - - @patch('select.kqueue', create=True) - @patch('kombu.utils.eventio.kqueue', create=True) - def test_kqueue(self, kq, kqueue): - self.assertEqual(default_implementation(), 'kqueue') - - @patch('celery.worker.autoreload.pyinotify') - def test_inotify(self, pyinotify): - kq = getattr(select, 'kqueue', None) - try: - delattr(select, 'kqueue') - except AttributeError: - pass - platform, sys.platform = sys.platform, 'linux' - try: - self.assertEqual(default_implementation(), 'inotify') - ino, autoreload.pyinotify = autoreload.pyinotify, None - try: - self.assertEqual(default_implementation(), 'stat') - finally: - autoreload.pyinotify = ino - finally: - if kq: - select.kqueue = kq - sys.platform = platform - - -class test_Autoreloader(AppCase): - - def test_register_with_event_loop(self): - x = Autoreloader(Mock(), modules=[__name__]) - hub = Mock() - x._monitor = None - x.on_init = Mock() - - def se(*args, **kwargs): - x._monitor = Mock() - x.on_init.side_effect = se - - x.register_with_event_loop(hub) - x.on_init.assert_called_with() - x._monitor.register_with_event_loop.assert_called_with(hub) - - x._monitor.register_with_event_loop.reset_mock() - x.register_with_event_loop(hub) - x._monitor.register_with_event_loop.assert_called_with(hub) - - def test_on_event_loop_close(self): - x = Autoreloader(Mock(), modules=[__name__]) - hub = Mock() - x._monitor = Mock() - x.on_event_loop_close(hub) - x._monitor.on_event_loop_close.assert_called_with(hub) - x._monitor = None - x.on_event_loop_close(hub) - - @patch('celery.worker.autoreload.file_hash') - def test_start(self, fhash): - x = Autoreloader(Mock(), modules=[__name__]) - x.Monitor = Mock() - mon = x.Monitor.return_value = Mock() - mon.start.side_effect = OSError() - mon.start.side_effect.errno = errno.EINTR - x.body() - mon.start.side_effect.errno = errno.ENOENT - with self.assertRaises(OSError): - x.body() - mon.start.side_effect = None - x.body() - - @patch('celery.worker.autoreload.file_hash') - @patch('os.path.exists') - def test_maybe_modified(self, exists, fhash): - exists.return_value = True - fhash.return_value = 'abcd' - x = Autoreloader(Mock(), modules=[__name__]) - x._hashes = {} - x._hashes[__name__] = 'dcba' - self.assertTrue(x._maybe_modified(__name__)) - x._hashes[__name__] = 'abcd' - self.assertFalse(x._maybe_modified(__name__)) - exists.return_value = False - self.assertFalse(x._maybe_modified(__name__)) - - def test_on_change(self): - x = Autoreloader(Mock(), modules=[__name__]) - mm = x._maybe_modified = Mock(0) - mm.return_value = True - x._reload = Mock() - x.file_to_module[__name__] = __name__ - x.on_change([__name__]) - self.assertTrue(x._reload.called) - mm.return_value = False - x.on_change([__name__]) - - def test_reload(self): - x = Autoreloader(Mock(), modules=[__name__]) - x._reload([__name__]) - x.controller.reload.assert_called_with([__name__], reload=True) - - def test_stop(self): - x = Autoreloader(Mock(), modules=[__name__]) - x._monitor = None - x.stop() - x._monitor = Mock() - x.stop() - x._monitor.stop.assert_called_with() diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_autoscale.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_autoscale.py deleted file mode 100644 index 45ea488..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_autoscale.py +++ /dev/null @@ -1,198 +0,0 @@ -from __future__ import absolute_import - -import sys - -from celery.concurrency.base import BasePool -from celery.five import monotonic -from celery.worker import state -from celery.worker import autoscale -from celery.tests.case import AppCase, Mock, patch, sleepdeprived - - -class Object(object): - pass - - -class MockPool(BasePool): - shrink_raises_exception = False - shrink_raises_ValueError = False - - def __init__(self, *args, **kwargs): - super(MockPool, self).__init__(*args, **kwargs) - self._pool = Object() - self._pool._processes = self.limit - - def grow(self, n=1): - self._pool._processes += n - - def shrink(self, n=1): - if self.shrink_raises_exception: - raise KeyError('foo') - if self.shrink_raises_ValueError: - raise ValueError('foo') - self._pool._processes -= n - - @property - def num_processes(self): - return self._pool._processes - - -class test_WorkerComponent(AppCase): - - def test_register_with_event_loop(self): - parent = Mock(name='parent') - parent.autoscale = True - parent.consumer.on_task_message = set() - w = autoscale.WorkerComponent(parent) - self.assertIsNone(parent.autoscaler) - self.assertTrue(w.enabled) - - hub = Mock(name='hub') - w.create(parent) - w.register_with_event_loop(parent, hub) - self.assertIn( - parent.autoscaler.maybe_scale, - parent.consumer.on_task_message, - ) - hub.call_repeatedly.assert_called_with( - parent.autoscaler.keepalive, parent.autoscaler.maybe_scale, - ) - - parent.hub = hub - hub.on_init = [] - w.instantiate = Mock() - w.register_with_event_loop(parent, Mock(name='loop')) - self.assertTrue(parent.consumer.on_task_message) - - -class test_Autoscaler(AppCase): - - def setup(self): - self.pool = MockPool(3) - - def test_stop(self): - - class Scaler(autoscale.Autoscaler): - alive = True - joined = False - - def is_alive(self): - return self.alive - - def join(self, timeout=None): - self.joined = True - - worker = Mock(name='worker') - x = Scaler(self.pool, 10, 3, worker=worker) - x._is_stopped.set() - x.stop() - self.assertTrue(x.joined) - x.joined = False - x.alive = False - x.stop() - self.assertFalse(x.joined) - - @sleepdeprived(autoscale) - def test_body(self): - worker = Mock(name='worker') - x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) - x.body() - self.assertEqual(x.pool.num_processes, 3) - for i in range(20): - state.reserved_requests.add(i) - x.body() - x.body() - self.assertEqual(x.pool.num_processes, 10) - self.assertTrue(worker.consumer._update_prefetch_count.called) - state.reserved_requests.clear() - x.body() - self.assertEqual(x.pool.num_processes, 10) - x._last_action = monotonic() - 10000 - x.body() - self.assertEqual(x.pool.num_processes, 3) - self.assertTrue(worker.consumer._update_prefetch_count.called) - - def test_run(self): - - class Scaler(autoscale.Autoscaler): - scale_called = False - - def body(self): - self.scale_called = True - self._is_shutdown.set() - - worker = Mock(name='worker') - x = Scaler(self.pool, 10, 3, worker=worker) - x.run() - self.assertTrue(x._is_shutdown.isSet()) - self.assertTrue(x._is_stopped.isSet()) - self.assertTrue(x.scale_called) - - def test_shrink_raises_exception(self): - worker = Mock(name='worker') - x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) - x.scale_up(3) - x._last_action = monotonic() - 10000 - x.pool.shrink_raises_exception = True - x.scale_down(1) - - @patch('celery.worker.autoscale.debug') - def test_shrink_raises_ValueError(self, debug): - worker = Mock(name='worker') - x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) - x.scale_up(3) - x._last_action = monotonic() - 10000 - x.pool.shrink_raises_ValueError = True - x.scale_down(1) - self.assertTrue(debug.call_count) - - def test_update_and_force(self): - worker = Mock(name='worker') - x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) - self.assertEqual(x.processes, 3) - x.force_scale_up(5) - self.assertEqual(x.processes, 8) - x.update(5, None) - self.assertEqual(x.processes, 5) - x.force_scale_down(3) - self.assertEqual(x.processes, 2) - x.update(3, None) - self.assertEqual(x.processes, 3) - x.force_scale_down(1000) - self.assertEqual(x.min_concurrency, 0) - self.assertEqual(x.processes, 0) - x.force_scale_up(1000) - x.min_concurrency = 1 - x.force_scale_down(1) - - x.update(max=300, min=10) - x.update(max=300, min=2) - x.update(max=None, min=None) - - def test_info(self): - worker = Mock(name='worker') - x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) - info = x.info() - self.assertEqual(info['max'], 10) - self.assertEqual(info['min'], 3) - self.assertEqual(info['current'], 3) - - @patch('os._exit') - def test_thread_crash(self, _exit): - - class _Autoscaler(autoscale.Autoscaler): - - def body(self): - self._is_shutdown.set() - raise OSError('foo') - worker = Mock(name='worker') - x = _Autoscaler(self.pool, 10, 3, worker=worker) - - stderr = Mock() - p, sys.stderr = sys.stderr, stderr - try: - x.run() - finally: - sys.stderr = p - _exit.assert_called_with(1) - self.assertTrue(stderr.write.call_count) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_bootsteps.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_bootsteps.py deleted file mode 100644 index 522d263..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_bootsteps.py +++ /dev/null @@ -1,338 +0,0 @@ -from __future__ import absolute_import - -from celery import bootsteps - -from celery.tests.case import AppCase, Mock, patch - - -class test_StepFormatter(AppCase): - - def test_get_prefix(self): - f = bootsteps.StepFormatter() - s = Mock() - s.last = True - self.assertEqual(f._get_prefix(s), f.blueprint_prefix) - - s2 = Mock() - s2.last = False - s2.conditional = True - self.assertEqual(f._get_prefix(s2), f.conditional_prefix) - - s3 = Mock() - s3.last = s3.conditional = False - self.assertEqual(f._get_prefix(s3), '') - - def test_node(self): - f = bootsteps.StepFormatter() - f.draw_node = Mock() - step = Mock() - step.last = False - f.node(step, x=3) - f.draw_node.assert_called_with(step, f.node_scheme, {'x': 3}) - - step.last = True - f.node(step, x=3) - f.draw_node.assert_called_with(step, f.blueprint_scheme, {'x': 3}) - - def test_edge(self): - f = bootsteps.StepFormatter() - f.draw_edge = Mock() - a, b = Mock(), Mock() - a.last = True - f.edge(a, b, x=6) - f.draw_edge.assert_called_with(a, b, f.edge_scheme, { - 'x': 6, 'arrowhead': 'none', 'color': 'darkseagreen3', - }) - - a.last = False - f.edge(a, b, x=6) - f.draw_edge.assert_called_with(a, b, f.edge_scheme, { - 'x': 6, - }) - - -class test_Step(AppCase): - - class Def(bootsteps.StartStopStep): - name = 'test_Step.Def' - - def setup(self): - self.steps = [] - - def test_blueprint_name(self, bp='test_blueprint_name'): - - class X(bootsteps.Step): - blueprint = bp - name = 'X' - self.assertEqual(X.name, 'X') - - class Y(bootsteps.Step): - name = '%s.Y' % bp - self.assertEqual(Y.name, '%s.Y' % bp) - - def test_init(self): - self.assertTrue(self.Def(self)) - - def test_create(self): - self.Def(self).create(self) - - def test_include_if(self): - x = self.Def(self) - x.enabled = True - self.assertTrue(x.include_if(self)) - - x.enabled = False - self.assertFalse(x.include_if(self)) - - def test_instantiate(self): - self.assertIsInstance(self.Def(self).instantiate(self.Def, self), - self.Def) - - def test_include_when_enabled(self): - x = self.Def(self) - x.create = Mock() - x.create.return_value = 'George' - self.assertTrue(x.include(self)) - - self.assertEqual(x.obj, 'George') - x.create.assert_called_with(self) - - def test_include_when_disabled(self): - x = self.Def(self) - x.enabled = False - x.create = Mock() - - self.assertFalse(x.include(self)) - self.assertFalse(x.create.call_count) - - def test_repr(self): - x = self.Def(self) - self.assertTrue(repr(x)) - - -class test_ConsumerStep(AppCase): - - def test_interface(self): - step = bootsteps.ConsumerStep(self) - with self.assertRaises(NotImplementedError): - step.get_consumers(self) - - def test_start_stop_shutdown(self): - consumer = Mock() - self.connection = Mock() - - class Step(bootsteps.ConsumerStep): - - def get_consumers(self, c): - return [consumer] - - step = Step(self) - self.assertEqual(step.get_consumers(self), [consumer]) - - step.start(self) - consumer.consume.assert_called_with() - step.stop(self) - consumer.cancel.assert_called_with() - - step.shutdown(self) - consumer.channel.close.assert_called_with() - - def test_start_no_consumers(self): - self.connection = Mock() - - class Step(bootsteps.ConsumerStep): - - def get_consumers(self, c): - return () - - step = Step(self) - step.start(self) - - -class test_StartStopStep(AppCase): - - class Def(bootsteps.StartStopStep): - name = 'test_StartStopStep.Def' - - def setup(self): - self.steps = [] - - def test_start__stop(self): - x = self.Def(self) - x.create = Mock() - - # include creates the underlying object and sets - # its x.obj attribute to it, as well as appending - # it to the parent.steps list. - x.include(self) - self.assertTrue(self.steps) - self.assertIs(self.steps[0], x) - - x.start(self) - x.obj.start.assert_called_with() - - x.stop(self) - x.obj.stop.assert_called_with() - - x.obj = None - self.assertIsNone(x.start(self)) - - def test_include_when_disabled(self): - x = self.Def(self) - x.enabled = False - x.include(self) - self.assertFalse(self.steps) - - def test_terminate(self): - x = self.Def(self) - x.create = Mock() - - x.include(self) - delattr(x.obj, 'terminate') - x.terminate(self) - x.obj.stop.assert_called_with() - - -class test_Blueprint(AppCase): - - class Blueprint(bootsteps.Blueprint): - name = 'test_Blueprint' - - def test_steps_added_to_unclaimed(self): - - class tnA(bootsteps.Step): - name = 'test_Blueprint.A' - - class tnB(bootsteps.Step): - name = 'test_Blueprint.B' - - class xxA(bootsteps.Step): - name = 'xx.A' - - class Blueprint(self.Blueprint): - default_steps = [tnA, tnB] - blueprint = Blueprint(app=self.app) - - self.assertIn(tnA, blueprint._all_steps()) - self.assertIn(tnB, blueprint._all_steps()) - self.assertNotIn(xxA, blueprint._all_steps()) - - def test_init(self): - blueprint = self.Blueprint(app=self.app) - self.assertIs(blueprint.app, self.app) - self.assertEqual(blueprint.name, 'test_Blueprint') - - def test_close__on_close_is_None(self): - blueprint = self.Blueprint(app=self.app) - blueprint.on_close = None - blueprint.send_all = Mock() - blueprint.close(1) - blueprint.send_all.assert_called_with( - 1, 'close', 'closing', reverse=False, - ) - - def test_send_all_with_None_steps(self): - parent = Mock() - blueprint = self.Blueprint(app=self.app) - parent.steps = [None, None, None] - blueprint.send_all(parent, 'close', 'Closing', reverse=False) - - def test_join_raises_IGNORE_ERRORS(self): - prev, bootsteps.IGNORE_ERRORS = bootsteps.IGNORE_ERRORS, (KeyError, ) - try: - blueprint = self.Blueprint(app=self.app) - blueprint.shutdown_complete = Mock() - blueprint.shutdown_complete.wait.side_effect = KeyError('luke') - blueprint.join(timeout=10) - blueprint.shutdown_complete.wait.assert_called_with(timeout=10) - finally: - bootsteps.IGNORE_ERRORS = prev - - def test_connect_with(self): - - class b1s1(bootsteps.Step): - pass - - class b1s2(bootsteps.Step): - last = True - - class b2s1(bootsteps.Step): - pass - - class b2s2(bootsteps.Step): - last = True - - b1 = self.Blueprint([b1s1, b1s2], app=self.app) - b2 = self.Blueprint([b2s1, b2s2], app=self.app) - b1.apply(Mock()) - b2.apply(Mock()) - b1.connect_with(b2) - - self.assertIn(b1s1, b1.graph) - self.assertIn(b2s1, b1.graph) - self.assertIn(b2s2, b1.graph) - - self.assertTrue(repr(b1s1)) - self.assertTrue(str(b1s1)) - - def test_topsort_raises_KeyError(self): - - class Step(bootsteps.Step): - requires = ('xyxxx.fsdasewe.Unknown', ) - - b = self.Blueprint([Step], app=self.app) - b.steps = b.claim_steps() - with self.assertRaises(ImportError): - b._finalize_steps(b.steps) - Step.requires = () - - b.steps = b.claim_steps() - b._finalize_steps(b.steps) - - with patch('celery.bootsteps.DependencyGraph') as Dep: - g = Dep.return_value = Mock() - g.topsort.side_effect = KeyError('foo') - with self.assertRaises(KeyError): - b._finalize_steps(b.steps) - - def test_apply(self): - - class MyBlueprint(bootsteps.Blueprint): - name = 'test_apply' - - def modules(self): - return ['A', 'B'] - - class B(bootsteps.Step): - name = 'test_apply.B' - - class C(bootsteps.Step): - name = 'test_apply.C' - requires = [B] - - class A(bootsteps.Step): - name = 'test_apply.A' - requires = [C] - - class D(bootsteps.Step): - name = 'test_apply.D' - last = True - - x = MyBlueprint([A, D], app=self.app) - x.apply(self) - - self.assertIsInstance(x.order[0], B) - self.assertIsInstance(x.order[1], C) - self.assertIsInstance(x.order[2], A) - self.assertIsInstance(x.order[3], D) - self.assertIn(A, x.types) - self.assertIs(x[A.name], x.order[2]) - - def test_find_last_but_no_steps(self): - - class MyBlueprint(bootsteps.Blueprint): - name = 'qwejwioqjewoqiej' - - x = MyBlueprint(app=self.app) - x.apply(self) - self.assertIsNone(x._find_last()) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_components.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_components.py deleted file mode 100644 index b39865d..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_components.py +++ /dev/null @@ -1,38 +0,0 @@ -from __future__ import absolute_import - -# some of these are tested in test_worker, so I've only written tests -# here to complete coverage. Should move everyting to this module at some -# point [-ask] - -from celery.worker.components import ( - Queues, - Pool, -) - -from celery.tests.case import AppCase, Mock - - -class test_Queues(AppCase): - - def test_create_when_eventloop(self): - w = Mock() - w.use_eventloop = w.pool_putlocks = w.pool_cls.uses_semaphore = True - q = Queues(w) - q.create(w) - self.assertIs(w.process_task, w._process_task_sem) - - -class test_Pool(AppCase): - - def test_close_terminate(self): - w = Mock() - comp = Pool(w) - pool = w.pool = Mock() - comp.close(w) - pool.close.assert_called_with() - comp.terminate(w) - pool.terminate.assert_called_with() - - w.pool = None - comp.close(w) - comp.terminate(w) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_consumer.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_consumer.py deleted file mode 100644 index ea4f6bb..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_consumer.py +++ /dev/null @@ -1,512 +0,0 @@ -from __future__ import absolute_import - -import errno -import socket - -from billiard.exceptions import RestartFreqExceeded - -from celery.datastructures import LimitedSet -from celery.worker import state as worker_state -from celery.worker.consumer import ( - Consumer, - Heart, - Tasks, - Agent, - Mingle, - Gossip, - dump_body, - CLOSE, -) - -from celery.tests.case import AppCase, ContextMock, Mock, SkipTest, call, patch - - -class test_Consumer(AppCase): - - def get_consumer(self, no_hub=False, **kwargs): - consumer = Consumer( - on_task_request=Mock(), - init_callback=Mock(), - pool=Mock(), - app=self.app, - timer=Mock(), - controller=Mock(), - hub=None if no_hub else Mock(), - **kwargs - ) - consumer.blueprint = Mock() - consumer._restart_state = Mock() - consumer.connection = _amqp_connection() - consumer.connection_errors = (socket.error, OSError, ) - return consumer - - def test_taskbuckets_defaultdict(self): - c = self.get_consumer() - self.assertIsNone(c.task_buckets['fooxasdwx.wewe']) - - def test_dump_body_buffer(self): - msg = Mock() - msg.body = 'str' - try: - buf = buffer(msg.body) - except NameError: - raise SkipTest('buffer type not available') - self.assertTrue(dump_body(msg, buf)) - - def test_sets_heartbeat(self): - c = self.get_consumer(amqheartbeat=10) - self.assertEqual(c.amqheartbeat, 10) - self.app.conf.BROKER_HEARTBEAT = 20 - c = self.get_consumer(amqheartbeat=None) - self.assertEqual(c.amqheartbeat, 20) - - def test_gevent_bug_disables_connection_timeout(self): - with patch('celery.worker.consumer._detect_environment') as de: - de.return_value = 'gevent' - self.app.conf.BROKER_CONNECTION_TIMEOUT = 33.33 - self.get_consumer() - self.assertIsNone(self.app.conf.BROKER_CONNECTION_TIMEOUT) - - def test_limit_task(self): - c = self.get_consumer() - - with patch('celery.worker.consumer.task_reserved') as reserved: - bucket = Mock() - request = Mock() - bucket.can_consume.return_value = True - - c._limit_task(request, bucket, 3) - bucket.can_consume.assert_called_with(3) - reserved.assert_called_with(request) - c.on_task_request.assert_called_with(request) - - with patch('celery.worker.consumer.task_reserved') as reserved: - bucket.can_consume.return_value = False - bucket.expected_time.return_value = 3.33 - c._limit_task(request, bucket, 4) - bucket.can_consume.assert_called_with(4) - c.timer.call_after.assert_called_with( - 3.33, c._limit_task, (request, bucket, 4), - ) - bucket.expected_time.assert_called_with(4) - self.assertFalse(reserved.called) - - def test_start_blueprint_raises_EMFILE(self): - c = self.get_consumer() - exc = c.blueprint.start.side_effect = OSError() - exc.errno = errno.EMFILE - - with self.assertRaises(OSError): - c.start() - - def test_max_restarts_exceeded(self): - c = self.get_consumer() - - def se(*args, **kwargs): - c.blueprint.state = CLOSE - raise RestartFreqExceeded() - c._restart_state.step.side_effect = se - c.blueprint.start.side_effect = socket.error() - - with patch('celery.worker.consumer.sleep') as sleep: - c.start() - sleep.assert_called_with(1) - - def _closer(self, c): - def se(*args, **kwargs): - c.blueprint.state = CLOSE - return se - - def test_collects_at_restart(self): - c = self.get_consumer() - c.connection.collect.side_effect = MemoryError() - c.blueprint.start.side_effect = socket.error() - c.blueprint.restart.side_effect = self._closer(c) - c.start() - c.connection.collect.assert_called_with() - - def test_register_with_event_loop(self): - c = self.get_consumer() - c.register_with_event_loop(Mock(name='loop')) - - def test_on_close_clears_semaphore_timer_and_reqs(self): - with patch('celery.worker.consumer.reserved_requests') as reserved: - c = self.get_consumer() - c.on_close() - c.controller.semaphore.clear.assert_called_with() - c.timer.clear.assert_called_with() - reserved.clear.assert_called_with() - c.pool.flush.assert_called_with() - - c.controller = None - c.timer = None - c.pool = None - c.on_close() - - def test_connect_error_handler(self): - self.app.connection = _amqp_connection() - conn = self.app.connection.return_value - c = self.get_consumer() - self.assertTrue(c.connect()) - self.assertTrue(conn.ensure_connection.called) - errback = conn.ensure_connection.call_args[0][0] - conn.alt = [(1, 2, 3)] - errback(Mock(), 0) - - -class test_Heart(AppCase): - - def test_start(self): - c = Mock() - c.timer = Mock() - c.event_dispatcher = Mock() - - with patch('celery.worker.heartbeat.Heart') as hcls: - h = Heart(c) - self.assertTrue(h.enabled) - self.assertEqual(h.heartbeat_interval, None) - self.assertIsNone(c.heart) - - h.start(c) - self.assertTrue(c.heart) - hcls.assert_called_with(c.timer, c.event_dispatcher, - h.heartbeat_interval) - c.heart.start.assert_called_with() - - def test_start_heartbeat_interval(self): - c = Mock() - c.timer = Mock() - c.event_dispatcher = Mock() - - with patch('celery.worker.heartbeat.Heart') as hcls: - h = Heart(c, False, 20) - self.assertTrue(h.enabled) - self.assertEqual(h.heartbeat_interval, 20) - self.assertIsNone(c.heart) - - h.start(c) - self.assertTrue(c.heart) - hcls.assert_called_with(c.timer, c.event_dispatcher, - h.heartbeat_interval) - c.heart.start.assert_called_with() - - -class test_Tasks(AppCase): - - def test_stop(self): - c = Mock() - tasks = Tasks(c) - self.assertIsNone(c.task_consumer) - self.assertIsNone(c.qos) - - c.task_consumer = Mock() - tasks.stop(c) - - def test_stop_already_stopped(self): - c = Mock() - tasks = Tasks(c) - tasks.stop(c) - - -class test_Agent(AppCase): - - def test_start(self): - c = Mock() - agent = Agent(c) - agent.instantiate = Mock() - agent.agent_cls = 'foo:Agent' - self.assertIsNotNone(agent.create(c)) - agent.instantiate.assert_called_with(agent.agent_cls, c.connection) - - -class test_Mingle(AppCase): - - def test_start_no_replies(self): - c = Mock() - c.app.connection = _amqp_connection() - mingle = Mingle(c) - I = c.app.control.inspect.return_value = Mock() - I.hello.return_value = {} - mingle.start(c) - - def test_start(self): - try: - c = Mock() - c.app.connection = _amqp_connection() - mingle = Mingle(c) - self.assertTrue(mingle.enabled) - - Aig = LimitedSet() - Big = LimitedSet() - Aig.add('Aig-1') - Aig.add('Aig-2') - Big.add('Big-1') - - I = c.app.control.inspect.return_value = Mock() - I.hello.return_value = { - 'A@example.com': { - 'clock': 312, - 'revoked': Aig._data, - }, - 'B@example.com': { - 'clock': 29, - 'revoked': Big._data, - }, - 'C@example.com': { - 'error': 'unknown method', - }, - } - - mingle.start(c) - I.hello.assert_called_with(c.hostname, worker_state.revoked._data) - c.app.clock.adjust.assert_has_calls([ - call(312), call(29), - ], any_order=True) - self.assertIn('Aig-1', worker_state.revoked) - self.assertIn('Aig-2', worker_state.revoked) - self.assertIn('Big-1', worker_state.revoked) - finally: - worker_state.revoked.clear() - - -def _amqp_connection(): - connection = ContextMock() - connection.return_value = ContextMock() - connection.return_value.transport.driver_type = 'amqp' - return connection - - -class test_Gossip(AppCase): - - def test_init(self): - c = self.Consumer() - c.app.connection = _amqp_connection() - g = Gossip(c) - self.assertTrue(g.enabled) - self.assertIs(c.gossip, g) - - def test_callbacks(self): - c = self.Consumer() - c.app.connection = _amqp_connection() - g = Gossip(c) - on_node_join = Mock(name='on_node_join') - on_node_join2 = Mock(name='on_node_join2') - on_node_leave = Mock(name='on_node_leave') - on_node_lost = Mock(name='on.node_lost') - g.on.node_join.add(on_node_join) - g.on.node_join.add(on_node_join2) - g.on.node_leave.add(on_node_leave) - g.on.node_lost.add(on_node_lost) - - worker = Mock(name='worker') - g.on_node_join(worker) - on_node_join.assert_called_with(worker) - on_node_join2.assert_called_with(worker) - g.on_node_leave(worker) - on_node_leave.assert_called_with(worker) - g.on_node_lost(worker) - on_node_lost.assert_called_with(worker) - - def test_election(self): - c = self.Consumer() - c.app.connection = _amqp_connection() - g = Gossip(c) - g.start(c) - g.election('id', 'topic', 'action') - self.assertListEqual(g.consensus_replies['id'], []) - g.dispatcher.send.assert_called_with( - 'worker-elect', id='id', topic='topic', cver=1, action='action', - ) - - def test_call_task(self): - c = self.Consumer() - c.app.connection = _amqp_connection() - g = Gossip(c) - g.start(c) - - with patch('celery.worker.consumer.signature') as signature: - sig = signature.return_value = Mock() - task = Mock() - g.call_task(task) - signature.assert_called_with(task, app=c.app) - sig.apply_async.assert_called_with() - - sig.apply_async.side_effect = MemoryError() - with patch('celery.worker.consumer.error') as error: - g.call_task(task) - self.assertTrue(error.called) - - def Event(self, id='id', clock=312, - hostname='foo@example.com', pid=4312, - topic='topic', action='action', cver=1): - return { - 'id': id, - 'clock': clock, - 'hostname': hostname, - 'pid': pid, - 'topic': topic, - 'action': action, - 'cver': cver, - } - - def test_on_elect(self): - c = self.Consumer() - c.app.connection = _amqp_connection() - g = Gossip(c) - g.start(c) - - event = self.Event('id1') - g.on_elect(event) - in_heap = g.consensus_requests['id1'] - self.assertTrue(in_heap) - g.dispatcher.send.assert_called_with('worker-elect-ack', id='id1') - - event.pop('clock') - with patch('celery.worker.consumer.error') as error: - g.on_elect(event) - self.assertTrue(error.called) - - def Consumer(self, hostname='foo@x.com', pid=4312): - c = Mock() - c.app.connection = _amqp_connection() - c.hostname = hostname - c.pid = pid - return c - - def setup_election(self, g, c): - g.start(c) - g.clock = self.app.clock - self.assertNotIn('idx', g.consensus_replies) - self.assertIsNone(g.on_elect_ack({'id': 'idx'})) - - g.state.alive_workers.return_value = [ - 'foo@x.com', 'bar@x.com', 'baz@x.com', - ] - g.consensus_replies['id1'] = [] - g.consensus_requests['id1'] = [] - e1 = self.Event('id1', 1, 'foo@x.com') - e2 = self.Event('id1', 2, 'bar@x.com') - e3 = self.Event('id1', 3, 'baz@x.com') - g.on_elect(e1) - g.on_elect(e2) - g.on_elect(e3) - self.assertEqual(len(g.consensus_requests['id1']), 3) - - with patch('celery.worker.consumer.info'): - g.on_elect_ack(e1) - self.assertEqual(len(g.consensus_replies['id1']), 1) - g.on_elect_ack(e2) - self.assertEqual(len(g.consensus_replies['id1']), 2) - g.on_elect_ack(e3) - with self.assertRaises(KeyError): - g.consensus_replies['id1'] - - def test_on_elect_ack_win(self): - c = self.Consumer(hostname='foo@x.com') # I will win - g = Gossip(c) - handler = g.election_handlers['topic'] = Mock() - self.setup_election(g, c) - handler.assert_called_with('action') - - def test_on_elect_ack_lose(self): - c = self.Consumer(hostname='bar@x.com') # I will lose - c.app.connection = _amqp_connection() - g = Gossip(c) - handler = g.election_handlers['topic'] = Mock() - self.setup_election(g, c) - self.assertFalse(handler.called) - - def test_on_elect_ack_win_but_no_action(self): - c = self.Consumer(hostname='foo@x.com') # I will win - g = Gossip(c) - g.election_handlers = {} - with patch('celery.worker.consumer.error') as error: - self.setup_election(g, c) - self.assertTrue(error.called) - - def test_on_node_join(self): - c = self.Consumer() - g = Gossip(c) - with patch('celery.worker.consumer.debug') as debug: - g.on_node_join(c) - debug.assert_called_with('%s joined the party', 'foo@x.com') - - def test_on_node_leave(self): - c = self.Consumer() - g = Gossip(c) - with patch('celery.worker.consumer.debug') as debug: - g.on_node_leave(c) - debug.assert_called_with('%s left', 'foo@x.com') - - def test_on_node_lost(self): - c = self.Consumer() - g = Gossip(c) - with patch('celery.worker.consumer.info') as info: - g.on_node_lost(c) - info.assert_called_with('missed heartbeat from %s', 'foo@x.com') - - def test_register_timer(self): - c = self.Consumer() - g = Gossip(c) - g.register_timer() - c.timer.call_repeatedly.assert_called_with(g.interval, g.periodic) - tref = g._tref - g.register_timer() - tref.cancel.assert_called_with() - - def test_periodic(self): - c = self.Consumer() - g = Gossip(c) - g.on_node_lost = Mock() - state = g.state = Mock() - worker = Mock() - state.workers = {'foo': worker} - worker.alive = True - worker.hostname = 'foo' - g.periodic() - - worker.alive = False - g.periodic() - g.on_node_lost.assert_called_with(worker) - with self.assertRaises(KeyError): - state.workers['foo'] - - def test_on_message(self): - c = self.Consumer() - g = Gossip(c) - self.assertTrue(g.enabled) - prepare = Mock() - prepare.return_value = 'worker-online', {} - c.app.events.State.assert_called_with( - on_node_join=g.on_node_join, - on_node_leave=g.on_node_leave, - max_tasks_in_memory=1, - ) - g.update_state = Mock() - worker = Mock() - g.on_node_join = Mock() - g.on_node_leave = Mock() - g.update_state.return_value = worker, 1 - message = Mock() - message.delivery_info = {'routing_key': 'worker-online'} - message.headers = {'hostname': 'other'} - - handler = g.event_handlers['worker-online'] = Mock() - g.on_message(prepare, message) - handler.assert_called_with(message.payload) - g.event_handlers = {} - - g.on_message(prepare, message) - - message.delivery_info = {'routing_key': 'worker-offline'} - prepare.return_value = 'worker-offline', {} - g.on_message(prepare, message) - - message.delivery_info = {'routing_key': 'worker-baz'} - prepare.return_value = 'worker-baz', {} - g.update_state.return_value = worker, 0 - g.on_message(prepare, message) - - message.headers = {'hostname': g.hostname} - g.on_message(prepare, message) - g.clock.forward.assert_called_with() diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_control.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_control.py deleted file mode 100644 index 86bf550..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_control.py +++ /dev/null @@ -1,601 +0,0 @@ -from __future__ import absolute_import - -import sys -import socket - -from collections import defaultdict -from datetime import datetime, timedelta - -from kombu import pidbox - -from celery.datastructures import AttributeDict -from celery.five import Queue as FastQueue -from celery.utils import uuid -from celery.utils.timer2 import Timer -from celery.worker import WorkController as _WC -from celery.worker import consumer -from celery.worker import control -from celery.worker import state as worker_state -from celery.worker.job import Request -from celery.worker.state import revoked -from celery.worker.control import Panel -from celery.worker.pidbox import Pidbox, gPidbox - -from celery.tests.case import AppCase, Mock, call, patch - -hostname = socket.gethostname() - - -class WorkController(object): - autoscaler = None - - def stats(self): - return {'total': worker_state.total_count} - - -class Consumer(consumer.Consumer): - - def __init__(self, app): - self.app = app - self.buffer = FastQueue() - self.handle_task = self.buffer.put - self.timer = Timer() - self.event_dispatcher = Mock() - self.controller = WorkController() - self.task_consumer = Mock() - self.prefetch_multiplier = 1 - self.initial_prefetch_count = 1 - - from celery.concurrency.base import BasePool - self.pool = BasePool(10) - self.task_buckets = defaultdict(lambda: None) - - -class test_Pidbox(AppCase): - - def test_shutdown(self): - with patch('celery.worker.pidbox.ignore_errors') as eig: - parent = Mock() - pbox = Pidbox(parent) - pbox._close_channel = Mock() - self.assertIs(pbox.c, parent) - pconsumer = pbox.consumer = Mock() - cancel = pconsumer.cancel - pbox.shutdown(parent) - eig.assert_called_with(parent, cancel) - pbox._close_channel.assert_called_with(parent) - - -class test_Pidbox_green(AppCase): - - def test_stop(self): - parent = Mock() - g = gPidbox(parent) - stopped = g._node_stopped = Mock() - shutdown = g._node_shutdown = Mock() - close_chan = g._close_channel = Mock() - - g.stop(parent) - shutdown.set.assert_called_with() - stopped.wait.assert_called_with() - close_chan.assert_called_with(parent) - self.assertIsNone(g._node_stopped) - self.assertIsNone(g._node_shutdown) - - close_chan.reset() - g.stop(parent) - close_chan.assert_called_with(parent) - - def test_resets(self): - parent = Mock() - g = gPidbox(parent) - g._resets = 100 - g.reset() - self.assertEqual(g._resets, 101) - - def test_loop(self): - parent = Mock() - conn = parent.connect.return_value = self.app.connection() - drain = conn.drain_events = Mock() - g = gPidbox(parent) - parent.connection = Mock() - do_reset = g._do_reset = Mock() - - call_count = [0] - - def se(*args, **kwargs): - if call_count[0] > 2: - g._node_shutdown.set() - g.reset() - call_count[0] += 1 - drain.side_effect = se - g.loop(parent) - - self.assertEqual(do_reset.call_count, 4) - - -class test_ControlPanel(AppCase): - - def setup(self): - self.panel = self.create_panel(consumer=Consumer(self.app)) - - @self.app.task(name='c.unittest.mytask', rate_limit=200, shared=False) - def mytask(): - pass - self.mytask = mytask - - def create_state(self, **kwargs): - kwargs.setdefault('app', self.app) - kwargs.setdefault('hostname', hostname) - return AttributeDict(kwargs) - - def create_panel(self, **kwargs): - return self.app.control.mailbox.Node(hostname=hostname, - state=self.create_state(**kwargs), - handlers=Panel.data) - - def test_enable_events(self): - consumer = Consumer(self.app) - panel = self.create_panel(consumer=consumer) - evd = consumer.event_dispatcher - evd.groups = set() - panel.handle('enable_events') - self.assertFalse(evd.groups) - evd.groups = set(['worker']) - panel.handle('enable_events') - self.assertIn('task', evd.groups) - evd.groups = set(['task']) - self.assertIn('already enabled', panel.handle('enable_events')['ok']) - - def test_disable_events(self): - consumer = Consumer(self.app) - panel = self.create_panel(consumer=consumer) - evd = consumer.event_dispatcher - evd.enabled = True - evd.groups = set(['task']) - panel.handle('disable_events') - self.assertNotIn('task', evd.groups) - self.assertIn('already disabled', panel.handle('disable_events')['ok']) - - def test_clock(self): - consumer = Consumer(self.app) - panel = self.create_panel(consumer=consumer) - panel.state.app.clock.value = 313 - x = panel.handle('clock') - self.assertEqual(x['clock'], 313) - - def test_hello(self): - consumer = Consumer(self.app) - panel = self.create_panel(consumer=consumer) - panel.state.app.clock.value = 313 - worker_state.revoked.add('revoked1') - try: - x = panel.handle('hello', {'from_node': 'george@vandelay.com'}) - self.assertIn('revoked1', x['revoked']) - self.assertEqual(x['clock'], 314) # incremented - finally: - worker_state.revoked.discard('revoked1') - - def test_conf(self): - return - consumer = Consumer(self.app) - panel = self.create_panel(consumer=consumer) - self.app.conf.SOME_KEY6 = 'hello world' - x = panel.handle('dump_conf') - self.assertIn('SOME_KEY6', x) - - def test_election(self): - consumer = Consumer(self.app) - panel = self.create_panel(consumer=consumer) - consumer.gossip = Mock() - panel.handle( - 'election', {'id': 'id', 'topic': 'topic', 'action': 'action'}, - ) - consumer.gossip.election.assert_called_with('id', 'topic', 'action') - - def test_heartbeat(self): - consumer = Consumer(self.app) - panel = self.create_panel(consumer=consumer) - consumer.event_dispatcher.enabled = True - panel.handle('heartbeat') - self.assertIn(('worker-heartbeat', ), - consumer.event_dispatcher.send.call_args) - - def test_time_limit(self): - panel = self.create_panel(consumer=Mock()) - r = panel.handle('time_limit', arguments=dict( - task_name=self.mytask.name, hard=30, soft=10)) - self.assertEqual( - (self.mytask.time_limit, self.mytask.soft_time_limit), - (30, 10), - ) - self.assertIn('ok', r) - r = panel.handle('time_limit', arguments=dict( - task_name=self.mytask.name, hard=None, soft=None)) - self.assertEqual( - (self.mytask.time_limit, self.mytask.soft_time_limit), - (None, None), - ) - self.assertIn('ok', r) - - r = panel.handle('time_limit', arguments=dict( - task_name='248e8afya9s8dh921eh928', hard=30)) - self.assertIn('error', r) - - def test_active_queues(self): - import kombu - - x = kombu.Consumer(self.app.connection(), - [kombu.Queue('foo', kombu.Exchange('foo'), 'foo'), - kombu.Queue('bar', kombu.Exchange('bar'), 'bar')], - auto_declare=False) - consumer = Mock() - consumer.task_consumer = x - panel = self.create_panel(consumer=consumer) - r = panel.handle('active_queues') - self.assertListEqual(list(sorted(q['name'] for q in r)), - ['bar', 'foo']) - - def test_dump_tasks(self): - info = '\n'.join(self.panel.handle('dump_tasks')) - self.assertIn('mytask', info) - self.assertIn('rate_limit=200', info) - - def test_stats(self): - prev_count, worker_state.total_count = worker_state.total_count, 100 - try: - self.assertDictContainsSubset({'total': 100}, - self.panel.handle('stats')) - finally: - worker_state.total_count = prev_count - - def test_report(self): - self.panel.handle('report') - - def test_active(self): - r = Request({ - 'task': self.mytask.name, - 'id': 'do re mi', - 'args': (), - 'kwargs': {}, - }, app=self.app) - worker_state.active_requests.add(r) - try: - self.assertTrue(self.panel.handle('dump_active')) - finally: - worker_state.active_requests.discard(r) - - def test_pool_grow(self): - - class MockPool(object): - - def __init__(self, size=1): - self.size = size - - def grow(self, n=1): - self.size += n - - def shrink(self, n=1): - self.size -= n - - @property - def num_processes(self): - return self.size - - consumer = Consumer(self.app) - consumer.prefetch_multiplier = 8 - consumer.qos = Mock(name='qos') - consumer.pool = MockPool(1) - panel = self.create_panel(consumer=consumer) - - panel.handle('pool_grow') - self.assertEqual(consumer.pool.size, 2) - consumer.qos.increment_eventually.assert_called_with(8) - self.assertEqual(consumer.initial_prefetch_count, 16) - panel.handle('pool_shrink') - self.assertEqual(consumer.pool.size, 1) - consumer.qos.decrement_eventually.assert_called_with(8) - self.assertEqual(consumer.initial_prefetch_count, 8) - - panel.state.consumer = Mock() - panel.state.consumer.controller = Mock() - sc = panel.state.consumer.controller.autoscaler = Mock() - panel.handle('pool_grow') - self.assertTrue(sc.force_scale_up.called) - panel.handle('pool_shrink') - self.assertTrue(sc.force_scale_down.called) - - def test_add__cancel_consumer(self): - - class MockConsumer(object): - queues = [] - canceled = [] - consuming = False - - def add_queue(self, queue): - self.queues.append(queue.name) - - def consume(self): - self.consuming = True - - def cancel_by_queue(self, queue): - self.canceled.append(queue) - - def consuming_from(self, queue): - return queue in self.queues - - consumer = Consumer(self.app) - consumer.task_consumer = MockConsumer() - panel = self.create_panel(consumer=consumer) - - panel.handle('add_consumer', {'queue': 'MyQueue'}) - self.assertIn('MyQueue', consumer.task_consumer.queues) - self.assertTrue(consumer.task_consumer.consuming) - panel.handle('add_consumer', {'queue': 'MyQueue'}) - panel.handle('cancel_consumer', {'queue': 'MyQueue'}) - self.assertIn('MyQueue', consumer.task_consumer.canceled) - - def test_revoked(self): - worker_state.revoked.clear() - worker_state.revoked.add('a1') - worker_state.revoked.add('a2') - - try: - self.assertEqual(sorted(self.panel.handle('dump_revoked')), - ['a1', 'a2']) - finally: - worker_state.revoked.clear() - - def test_dump_schedule(self): - consumer = Consumer(self.app) - panel = self.create_panel(consumer=consumer) - self.assertFalse(panel.handle('dump_schedule')) - r = Request({ - 'task': self.mytask.name, - 'id': 'CAFEBABE', - 'args': (), - 'kwargs': {}, - }, app=self.app) - consumer.timer.schedule.enter_at( - consumer.timer.Entry(lambda x: x, (r, )), - datetime.now() + timedelta(seconds=10)) - consumer.timer.schedule.enter_at( - consumer.timer.Entry(lambda x: x, (object(), )), - datetime.now() + timedelta(seconds=10)) - self.assertTrue(panel.handle('dump_schedule')) - - def test_dump_reserved(self): - consumer = Consumer(self.app) - worker_state.reserved_requests.add(Request({ - 'task': self.mytask.name, - 'id': uuid(), - 'args': (2, 2), - 'kwargs': {}, - }, app=self.app)) - try: - panel = self.create_panel(consumer=consumer) - response = panel.handle('dump_reserved', {'safe': True}) - self.assertDictContainsSubset( - {'name': self.mytask.name, - 'args': (2, 2), - 'kwargs': {}, - 'hostname': socket.gethostname()}, - response[0], - ) - worker_state.reserved_requests.clear() - self.assertFalse(panel.handle('dump_reserved')) - finally: - worker_state.reserved_requests.clear() - - def test_rate_limit_invalid_rate_limit_string(self): - e = self.panel.handle('rate_limit', arguments=dict( - task_name='tasks.add', rate_limit='x1240301#%!')) - self.assertIn('Invalid rate limit string', e.get('error')) - - def test_rate_limit(self): - - class xConsumer(object): - reset = False - - def reset_rate_limits(self): - self.reset = True - - consumer = xConsumer() - panel = self.create_panel(app=self.app, consumer=consumer) - - task = self.app.tasks[self.mytask.name] - panel.handle('rate_limit', arguments=dict(task_name=task.name, - rate_limit='100/m')) - self.assertEqual(task.rate_limit, '100/m') - self.assertTrue(consumer.reset) - consumer.reset = False - panel.handle('rate_limit', arguments=dict(task_name=task.name, - rate_limit=0)) - self.assertEqual(task.rate_limit, 0) - self.assertTrue(consumer.reset) - - def test_rate_limit_nonexistant_task(self): - self.panel.handle('rate_limit', arguments={ - 'task_name': 'xxxx.does.not.exist', - 'rate_limit': '1000/s'}) - - def test_unexposed_command(self): - with self.assertRaises(KeyError): - self.panel.handle('foo', arguments={}) - - def test_revoke_with_name(self): - tid = uuid() - m = {'method': 'revoke', - 'destination': hostname, - 'arguments': {'task_id': tid, - 'task_name': self.mytask.name}} - self.panel.handle_message(m, None) - self.assertIn(tid, revoked) - - def test_revoke_with_name_not_in_registry(self): - tid = uuid() - m = {'method': 'revoke', - 'destination': hostname, - 'arguments': {'task_id': tid, - 'task_name': 'xxxxxxxxx33333333388888'}} - self.panel.handle_message(m, None) - self.assertIn(tid, revoked) - - def test_revoke(self): - tid = uuid() - m = {'method': 'revoke', - 'destination': hostname, - 'arguments': {'task_id': tid}} - self.panel.handle_message(m, None) - self.assertIn(tid, revoked) - - m = {'method': 'revoke', - 'destination': 'does.not.exist', - 'arguments': {'task_id': tid + 'xxx'}} - self.panel.handle_message(m, None) - self.assertNotIn(tid + 'xxx', revoked) - - def test_revoke_terminate(self): - request = Mock() - request.id = tid = uuid() - worker_state.reserved_requests.add(request) - try: - r = control.revoke(Mock(), tid, terminate=True) - self.assertIn(tid, revoked) - self.assertTrue(request.terminate.call_count) - self.assertIn('terminate:', r['ok']) - # unknown task id only revokes - r = control.revoke(Mock(), uuid(), terminate=True) - self.assertIn('tasks unknown', r['ok']) - finally: - worker_state.reserved_requests.discard(request) - - def test_autoscale(self): - self.panel.state.consumer = Mock() - self.panel.state.consumer.controller = Mock() - sc = self.panel.state.consumer.controller.autoscaler = Mock() - sc.update.return_value = 10, 2 - m = {'method': 'autoscale', - 'destination': hostname, - 'arguments': {'max': '10', 'min': '2'}} - r = self.panel.handle_message(m, None) - self.assertIn('ok', r) - - self.panel.state.consumer.controller.autoscaler = None - r = self.panel.handle_message(m, None) - self.assertIn('error', r) - - def test_ping(self): - m = {'method': 'ping', - 'destination': hostname} - r = self.panel.handle_message(m, None) - self.assertEqual(r, {'ok': 'pong'}) - - def test_shutdown(self): - m = {'method': 'shutdown', - 'destination': hostname} - with self.assertRaises(SystemExit): - self.panel.handle_message(m, None) - - def test_panel_reply(self): - - replies = [] - - class _Node(pidbox.Node): - - def reply(self, data, exchange, routing_key, **kwargs): - replies.append(data) - - panel = _Node(hostname=hostname, - state=self.create_state(consumer=Consumer(self.app)), - handlers=Panel.data, - mailbox=self.app.control.mailbox) - r = panel.dispatch('ping', reply_to={'exchange': 'x', - 'routing_key': 'x'}) - self.assertEqual(r, {'ok': 'pong'}) - self.assertDictEqual(replies[0], {panel.hostname: {'ok': 'pong'}}) - - def test_pool_restart(self): - consumer = Consumer(self.app) - consumer.controller = _WC(app=self.app) - consumer.controller.consumer = consumer - consumer.controller.pool.restart = Mock() - consumer.reset_rate_limits = Mock(name='reset_rate_limits()') - consumer.update_strategies = Mock(name='update_strategies()') - consumer.event_dispatcher = Mock(name='evd') - panel = self.create_panel(consumer=consumer) - assert panel.state.consumer.controller.consumer is consumer - panel.app = self.app - _import = panel.app.loader.import_from_cwd = Mock() - _reload = Mock() - - with self.assertRaises(ValueError): - panel.handle('pool_restart', {'reloader': _reload}) - - self.app.conf.CELERYD_POOL_RESTARTS = True - panel.handle('pool_restart', {'reloader': _reload}) - self.assertTrue(consumer.controller.pool.restart.called) - consumer.reset_rate_limits.assert_called_with() - consumer.update_strategies.assert_called_with() - self.assertFalse(_reload.called) - self.assertFalse(_import.called) - - def test_pool_restart_import_modules(self): - consumer = Consumer(self.app) - consumer.controller = _WC(app=self.app) - consumer.controller.consumer = consumer - consumer.controller.pool.restart = Mock() - consumer.reset_rate_limits = Mock(name='reset_rate_limits()') - consumer.update_strategies = Mock(name='update_strategies()') - panel = self.create_panel(consumer=consumer) - panel.app = self.app - assert panel.state.consumer.controller.consumer is consumer - _import = consumer.controller.app.loader.import_from_cwd = Mock() - _reload = Mock() - - self.app.conf.CELERYD_POOL_RESTARTS = True - panel.handle('pool_restart', {'modules': ['foo', 'bar'], - 'reloader': _reload}) - - self.assertTrue(consumer.controller.pool.restart.called) - consumer.reset_rate_limits.assert_called_with() - consumer.update_strategies.assert_called_with() - self.assertFalse(_reload.called) - self.assertItemsEqual( - [call('bar'), call('foo')], - _import.call_args_list, - ) - - def test_pool_restart_reload_modules(self): - consumer = Consumer(self.app) - consumer.controller = _WC(app=self.app) - consumer.controller.consumer = consumer - consumer.controller.pool.restart = Mock() - consumer.reset_rate_limits = Mock(name='reset_rate_limits()') - consumer.update_strategies = Mock(name='update_strategies()') - panel = self.create_panel(consumer=consumer) - panel.app = self.app - _import = panel.app.loader.import_from_cwd = Mock() - _reload = Mock() - - self.app.conf.CELERYD_POOL_RESTARTS = True - with patch.dict(sys.modules, {'foo': None}): - panel.handle('pool_restart', {'modules': ['foo'], - 'reload': False, - 'reloader': _reload}) - - self.assertTrue(consumer.controller.pool.restart.called) - self.assertFalse(_reload.called) - self.assertFalse(_import.called) - - _import.reset_mock() - _reload.reset_mock() - consumer.controller.pool.restart.reset_mock() - - panel.handle('pool_restart', {'modules': ['foo'], - 'reload': True, - 'reloader': _reload}) - - self.assertTrue(consumer.controller.pool.restart.called) - self.assertTrue(_reload.called) - self.assertFalse(_import.called) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_heartbeat.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_heartbeat.py deleted file mode 100644 index 50559ca..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_heartbeat.py +++ /dev/null @@ -1,73 +0,0 @@ -from __future__ import absolute_import - -from celery.worker.heartbeat import Heart -from celery.tests.case import AppCase - - -class MockDispatcher(object): - heart = None - next_iter = 0 - - def __init__(self): - self.sent = [] - self.on_enabled = set() - self.on_disabled = set() - self.enabled = True - - def send(self, msg, **_fields): - self.sent.append(msg) - if self.heart: - if self.next_iter > 10: - self.heart._shutdown.set() - self.next_iter += 1 - - -class MockDispatcherRaising(object): - - def send(self, msg): - if msg == 'worker-offline': - raise Exception('foo') - - -class MockTimer(object): - - def call_repeatedly(self, secs, fun, args=(), kwargs={}): - - class entry(tuple): - canceled = False - - def cancel(self): - self.canceled = True - - return entry((secs, fun, args, kwargs)) - - def cancel(self, entry): - entry.cancel() - - -class test_Heart(AppCase): - - def test_start_stop(self): - timer = MockTimer() - eventer = MockDispatcher() - h = Heart(timer, eventer, interval=1) - h.start() - self.assertTrue(h.tref) - h.stop() - self.assertIsNone(h.tref) - h.stop() - - def test_start_when_disabled(self): - timer = MockTimer() - eventer = MockDispatcher() - eventer.enabled = False - h = Heart(timer, eventer) - h.start() - self.assertFalse(h.tref) - - def test_stop_when_disabled(self): - timer = MockTimer() - eventer = MockDispatcher() - eventer.enabled = False - h = Heart(timer, eventer) - h.stop() diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_hub.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_hub.py deleted file mode 100644 index e84abf3..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_hub.py +++ /dev/null @@ -1,342 +0,0 @@ -from __future__ import absolute_import - -from kombu.async import Hub, READ, WRITE, ERR -from kombu.async.debug import callback_for, repr_flag, _rcb -from kombu.async.semaphore import DummyLock, LaxBoundedSemaphore - -from celery.five import range -from celery.tests.case import Case, Mock, call, patch - - -class File(object): - - def __init__(self, fd): - self.fd = fd - - def fileno(self): - return self.fd - - def __eq__(self, other): - if isinstance(other, File): - return self.fd == other.fd - return NotImplemented - - def __hash__(self): - return hash(self.fd) - - -class test_DummyLock(Case): - - def test_context(self): - mutex = DummyLock() - with mutex: - pass - - -class test_LaxBoundedSemaphore(Case): - - def test_acquire_release(self): - x = LaxBoundedSemaphore(2) - - c1 = Mock() - x.acquire(c1, 1) - self.assertEqual(x.value, 1) - c1.assert_called_with(1) - - c2 = Mock() - x.acquire(c2, 2) - self.assertEqual(x.value, 0) - c2.assert_called_with(2) - - c3 = Mock() - x.acquire(c3, 3) - self.assertEqual(x.value, 0) - self.assertFalse(c3.called) - - x.release() - self.assertEqual(x.value, 0) - x.release() - self.assertEqual(x.value, 1) - x.release() - self.assertEqual(x.value, 2) - c3.assert_called_with(3) - - def test_bounded(self): - x = LaxBoundedSemaphore(2) - for i in range(100): - x.release() - self.assertEqual(x.value, 2) - - def test_grow_shrink(self): - x = LaxBoundedSemaphore(1) - self.assertEqual(x.initial_value, 1) - cb1 = Mock() - x.acquire(cb1, 1) - cb1.assert_called_with(1) - self.assertEqual(x.value, 0) - - cb2 = Mock() - x.acquire(cb2, 2) - self.assertFalse(cb2.called) - self.assertEqual(x.value, 0) - - cb3 = Mock() - x.acquire(cb3, 3) - self.assertFalse(cb3.called) - - x.grow(2) - cb2.assert_called_with(2) - cb3.assert_called_with(3) - self.assertEqual(x.value, 2) - self.assertEqual(x.initial_value, 3) - - self.assertFalse(x._waiting) - x.grow(3) - for i in range(x.initial_value): - self.assertTrue(x.acquire(Mock())) - self.assertFalse(x.acquire(Mock())) - x.clear() - - x.shrink(3) - for i in range(x.initial_value): - self.assertTrue(x.acquire(Mock())) - self.assertFalse(x.acquire(Mock())) - self.assertEqual(x.value, 0) - - for i in range(100): - x.release() - self.assertEqual(x.value, x.initial_value) - - def test_clear(self): - x = LaxBoundedSemaphore(10) - for i in range(11): - x.acquire(Mock()) - self.assertTrue(x._waiting) - self.assertEqual(x.value, 0) - - x.clear() - self.assertFalse(x._waiting) - self.assertEqual(x.value, x.initial_value) - - -class test_Hub(Case): - - def test_repr_flag(self): - self.assertEqual(repr_flag(READ), 'R') - self.assertEqual(repr_flag(WRITE), 'W') - self.assertEqual(repr_flag(ERR), '!') - self.assertEqual(repr_flag(READ | WRITE), 'RW') - self.assertEqual(repr_flag(READ | ERR), 'R!') - self.assertEqual(repr_flag(WRITE | ERR), 'W!') - self.assertEqual(repr_flag(READ | WRITE | ERR), 'RW!') - - def test_repr_callback_rcb(self): - - def f(): - pass - - self.assertEqual(_rcb(f), f.__name__) - self.assertEqual(_rcb('foo'), 'foo') - - @patch('kombu.async.hub.poll') - def test_start_stop(self, poll): - hub = Hub() - poll.assert_called_with() - - poller = hub.poller - hub.stop() - hub.close() - poller.close.assert_called_with() - - def test_fire_timers(self): - hub = Hub() - hub.timer = Mock() - hub.timer._queue = [] - self.assertEqual(hub.fire_timers(min_delay=42.324, - max_delay=32.321), 32.321) - - hub.timer._queue = [1] - hub.scheduler = iter([(3.743, None)]) - self.assertEqual(hub.fire_timers(), 3.743) - - e1, e2, e3 = Mock(), Mock(), Mock() - entries = [e1, e2, e3] - - def reset(): - return [m.reset() for m in [e1, e2, e3]] - - def se(): - while 1: - while entries: - yield None, entries.pop() - yield 3.982, None - hub.scheduler = se() - - self.assertEqual(hub.fire_timers(max_timers=10), 3.982) - for E in [e3, e2, e1]: - E.assert_called_with() - reset() - - entries[:] = [Mock() for _ in range(11)] - keep = list(entries) - self.assertEqual(hub.fire_timers(max_timers=10, min_delay=1.13), 1.13) - for E in reversed(keep[1:]): - E.assert_called_with() - reset() - self.assertEqual(hub.fire_timers(max_timers=10), 3.982) - keep[0].assert_called_with() - - def test_fire_timers_raises(self): - hub = Hub() - eback = Mock() - eback.side_effect = KeyError('foo') - hub.timer = Mock() - hub.scheduler = iter([(0, eback)]) - with self.assertRaises(KeyError): - hub.fire_timers(propagate=(KeyError, )) - - eback.side_effect = ValueError('foo') - hub.scheduler = iter([(0, eback)]) - with patch('kombu.async.hub.logger') as logger: - with self.assertRaises(StopIteration): - hub.fire_timers() - self.assertTrue(logger.error.called) - - def test_add_raises_ValueError(self): - hub = Hub() - hub.poller = Mock(name='hub.poller') - hub.poller.register.side_effect = ValueError() - hub._discard = Mock(name='hub.discard') - with self.assertRaises(ValueError): - hub.add(2, Mock(), READ) - hub._discard.assert_called_with(2) - - def test_repr_active(self): - hub = Hub() - hub.readers = {1: Mock(), 2: Mock()} - hub.writers = {3: Mock(), 4: Mock()} - for value in list(hub.readers.values()) + list(hub.writers.values()): - value.__name__ = 'mock' - self.assertTrue(hub.repr_active()) - - def test_repr_events(self): - hub = Hub() - hub.readers = {6: Mock(), 7: Mock(), 8: Mock()} - hub.writers = {9: Mock()} - for value in list(hub.readers.values()) + list(hub.writers.values()): - value.__name__ = 'mock' - self.assertTrue(hub.repr_events([ - (6, READ), - (7, ERR), - (8, READ | ERR), - (9, WRITE), - (10, 13213), - ])) - - def test_callback_for(self): - hub = Hub() - reader, writer = Mock(), Mock() - hub.readers = {6: reader} - hub.writers = {7: writer} - - self.assertEqual(callback_for(hub, 6, READ), reader) - self.assertEqual(callback_for(hub, 7, WRITE), writer) - with self.assertRaises(KeyError): - callback_for(hub, 6, WRITE) - self.assertEqual(callback_for(hub, 6, WRITE, 'foo'), 'foo') - - def test_add_remove_readers(self): - hub = Hub() - P = hub.poller = Mock() - - read_A = Mock() - read_B = Mock() - hub.add_reader(10, read_A, 10) - hub.add_reader(File(11), read_B, 11) - - P.register.assert_has_calls([ - call(10, hub.READ | hub.ERR), - call(11, hub.READ | hub.ERR), - ], any_order=True) - - self.assertEqual(hub.readers[10], (read_A, (10, ))) - self.assertEqual(hub.readers[11], (read_B, (11, ))) - - hub.remove(10) - self.assertNotIn(10, hub.readers) - hub.remove(File(11)) - self.assertNotIn(11, hub.readers) - P.unregister.assert_has_calls([ - call(10), call(11), - ]) - - def test_can_remove_unknown_fds(self): - hub = Hub() - hub.poller = Mock() - hub.remove(30) - hub.remove(File(301)) - - def test_remove__unregister_raises(self): - hub = Hub() - hub.poller = Mock() - hub.poller.unregister.side_effect = OSError() - - hub.remove(313) - - def test_add_writers(self): - hub = Hub() - P = hub.poller = Mock() - - write_A = Mock() - write_B = Mock() - hub.add_writer(20, write_A) - hub.add_writer(File(21), write_B) - - P.register.assert_has_calls([ - call(20, hub.WRITE), - call(21, hub.WRITE), - ], any_order=True) - - self.assertEqual(hub.writers[20], (write_A, ())) - self.assertEqual(hub.writers[21], (write_B, ())) - - hub.remove(20) - self.assertNotIn(20, hub.writers) - hub.remove(File(21)) - self.assertNotIn(21, hub.writers) - P.unregister.assert_has_calls([ - call(20), call(21), - ]) - - def test_enter__exit(self): - hub = Hub() - P = hub.poller = Mock() - on_close = Mock() - hub.on_close.add(on_close) - - try: - read_A = Mock() - read_B = Mock() - hub.add_reader(10, read_A) - hub.add_reader(File(11), read_B) - write_A = Mock() - write_B = Mock() - hub.add_writer(20, write_A) - hub.add_writer(File(21), write_B) - self.assertTrue(hub.readers) - self.assertTrue(hub.writers) - finally: - assert hub.poller - hub.close() - self.assertFalse(hub.readers) - self.assertFalse(hub.writers) - - P.unregister.assert_has_calls([ - call(10), call(11), call(20), call(21), - ], any_order=True) - - on_close.assert_called_with(hub) - - def test_scheduler_property(self): - hub = Hub(timer=[1, 2, 3]) - self.assertEqual(list(hub.scheduler), [1, 2, 3]) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_loops.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_loops.py deleted file mode 100644 index be8d3a1..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_loops.py +++ /dev/null @@ -1,425 +0,0 @@ -from __future__ import absolute_import - -import socket - -from kombu.async import Hub, READ, WRITE, ERR - -from celery.bootsteps import CLOSE, RUN -from celery.exceptions import InvalidTaskError, WorkerShutdown, WorkerTerminate -from celery.five import Empty -from celery.worker import state -from celery.worker.consumer import Consumer -from celery.worker.loops import asynloop, synloop - -from celery.tests.case import AppCase, Mock, body_from_sig - - -class X(object): - - def __init__(self, app, heartbeat=None, on_task_message=None, - transport_driver_type=None): - hub = Hub() - ( - self.obj, - self.connection, - self.consumer, - self.blueprint, - self.hub, - self.qos, - self.heartbeat, - self.clock, - ) = self.args = [Mock(name='obj'), - Mock(name='connection'), - Mock(name='consumer'), - Mock(name='blueprint'), - hub, - Mock(name='qos'), - heartbeat, - Mock(name='clock')] - self.connection.supports_heartbeats = True - self.connection.get_heartbeat_interval.side_effect = ( - lambda: self.heartbeat - ) - self.consumer.callbacks = [] - self.obj.strategies = {} - self.connection.connection_errors = (socket.error, ) - if transport_driver_type: - self.connection.transport.driver_type = transport_driver_type - self.hub.readers = {} - self.hub.writers = {} - self.hub.consolidate = set() - self.hub.timer = Mock(name='hub.timer') - self.hub.timer._queue = [Mock()] - self.hub.fire_timers = Mock(name='hub.fire_timers') - self.hub.fire_timers.return_value = 1.7 - self.hub.poller = Mock(name='hub.poller') - self.hub.close = Mock(name='hub.close()') # asynloop calls hub.close - self.Hub = self.hub - self.blueprint.state = RUN - # need this for create_task_handler - _consumer = Consumer(Mock(), timer=Mock(), app=app) - _consumer.on_task_message = on_task_message or [] - self.obj.create_task_handler = _consumer.create_task_handler - self.on_unknown_message = self.obj.on_unknown_message = Mock( - name='on_unknown_message', - ) - _consumer.on_unknown_message = self.on_unknown_message - self.on_unknown_task = self.obj.on_unknown_task = Mock( - name='on_unknown_task', - ) - _consumer.on_unknown_task = self.on_unknown_task - self.on_invalid_task = self.obj.on_invalid_task = Mock( - name='on_invalid_task', - ) - _consumer.on_invalid_task = self.on_invalid_task - _consumer.strategies = self.obj.strategies - - def timeout_then_error(self, mock): - - def first(*args, **kwargs): - mock.side_effect = socket.error() - self.connection.more_to_read = False - raise socket.timeout() - mock.side_effect = first - - def close_then_error(self, mock=None, mod=0, exc=None): - mock = Mock() if mock is None else mock - - def first(*args, **kwargs): - if not mod or mock.call_count > mod: - self.close() - self.connection.more_to_read = False - raise (socket.error() if exc is None else exc) - mock.side_effect = first - return mock - - def close(self, *args, **kwargs): - self.blueprint.state = CLOSE - - def closer(self, mock=None, mod=0): - mock = Mock() if mock is None else mock - - def closing(*args, **kwargs): - if not mod or mock.call_count >= mod: - self.close() - mock.side_effect = closing - return mock - - -def get_task_callback(*args, **kwargs): - x = X(*args, **kwargs) - x.blueprint.state = CLOSE - asynloop(*x.args) - return x, x.consumer.callbacks[0] - - -class test_asynloop(AppCase): - - def setup(self): - - @self.app.task(shared=False) - def add(x, y): - return x + y - self.add = add - - def test_drain_after_consume(self): - x, _ = get_task_callback(self.app, transport_driver_type='amqp') - self.assertIn( - x.connection.drain_events, [p.fun for p in x.hub._ready], - ) - - def test_setup_heartbeat(self): - x = X(self.app, heartbeat=10) - x.hub.call_repeatedly = Mock(name='x.hub.call_repeatedly()') - x.blueprint.state = CLOSE - asynloop(*x.args) - x.consumer.consume.assert_called_with() - x.obj.on_ready.assert_called_with() - x.hub.call_repeatedly.assert_called_with( - 10 / 2.0, x.connection.heartbeat_check, 2.0, - ) - - def task_context(self, sig, **kwargs): - x, on_task = get_task_callback(self.app, **kwargs) - body = body_from_sig(self.app, sig) - message = Mock() - strategy = x.obj.strategies[sig.task] = Mock() - return x, on_task, body, message, strategy - - def test_on_task_received(self): - _, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2)) - on_task(body, msg) - strategy.assert_called_with( - msg, body, msg.ack_log_error, msg.reject_log_error, [], - ) - - def test_on_task_received_executes_on_task_message(self): - cbs = [Mock(), Mock(), Mock()] - _, on_task, body, msg, strategy = self.task_context( - self.add.s(2, 2), on_task_message=cbs, - ) - on_task(body, msg) - strategy.assert_called_with( - msg, body, msg.ack_log_error, msg.reject_log_error, cbs, - ) - - def test_on_task_message_missing_name(self): - x, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2)) - body.pop('task') - on_task(body, msg) - x.on_unknown_message.assert_called_with(body, msg) - - def test_on_task_not_registered(self): - x, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2)) - exc = strategy.side_effect = KeyError(self.add.name) - on_task(body, msg) - x.on_unknown_task.assert_called_with(body, msg, exc) - - def test_on_task_InvalidTaskError(self): - x, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2)) - exc = strategy.side_effect = InvalidTaskError() - on_task(body, msg) - x.on_invalid_task.assert_called_with(body, msg, exc) - - def test_should_terminate(self): - x = X(self.app) - # XXX why aren't the errors propagated?!? - state.should_terminate = True - try: - with self.assertRaises(WorkerTerminate): - asynloop(*x.args) - finally: - state.should_terminate = False - - def test_should_terminate_hub_close_raises(self): - x = X(self.app) - # XXX why aren't the errors propagated?!? - state.should_terminate = True - x.hub.close.side_effect = MemoryError() - try: - with self.assertRaises(WorkerTerminate): - asynloop(*x.args) - finally: - state.should_terminate = False - - def test_should_stop(self): - x = X(self.app) - state.should_stop = True - try: - with self.assertRaises(WorkerShutdown): - asynloop(*x.args) - finally: - state.should_stop = False - - def test_updates_qos(self): - x = X(self.app) - x.qos.prev = 3 - x.qos.value = 3 - x.hub.on_tick.add(x.closer(mod=2)) - x.hub.timer._queue = [1] - asynloop(*x.args) - self.assertFalse(x.qos.update.called) - - x = X(self.app) - x.qos.prev = 1 - x.qos.value = 6 - x.hub.on_tick.add(x.closer(mod=2)) - asynloop(*x.args) - x.qos.update.assert_called_with() - x.hub.fire_timers.assert_called_with(propagate=(socket.error, )) - - def test_poll_empty(self): - x = X(self.app) - x.hub.readers = {6: Mock()} - x.hub.timer._queue = [1] - x.close_then_error(x.hub.poller.poll) - x.hub.fire_timers.return_value = 33.37 - poller = x.hub.poller - poller.poll.return_value = [] - with self.assertRaises(socket.error): - asynloop(*x.args) - poller.poll.assert_called_with(33.37) - - def test_poll_readable(self): - x = X(self.app) - reader = Mock(name='reader') - x.hub.add_reader(6, reader, 6) - x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), mod=4)) - poller = x.hub.poller - poller.poll.return_value = [(6, READ)] - with self.assertRaises(socket.error): - asynloop(*x.args) - reader.assert_called_with(6) - self.assertTrue(poller.poll.called) - - def test_poll_readable_raises_Empty(self): - x = X(self.app) - reader = Mock(name='reader') - x.hub.add_reader(6, reader, 6) - x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) - poller = x.hub.poller - poller.poll.return_value = [(6, READ)] - reader.side_effect = Empty() - with self.assertRaises(socket.error): - asynloop(*x.args) - reader.assert_called_with(6) - self.assertTrue(poller.poll.called) - - def test_poll_writable(self): - x = X(self.app) - writer = Mock(name='writer') - x.hub.add_writer(6, writer, 6) - x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) - poller = x.hub.poller - poller.poll.return_value = [(6, WRITE)] - with self.assertRaises(socket.error): - asynloop(*x.args) - writer.assert_called_with(6) - self.assertTrue(poller.poll.called) - - def test_poll_writable_none_registered(self): - x = X(self.app) - writer = Mock(name='writer') - x.hub.add_writer(6, writer, 6) - x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) - poller = x.hub.poller - poller.poll.return_value = [(7, WRITE)] - with self.assertRaises(socket.error): - asynloop(*x.args) - self.assertTrue(poller.poll.called) - - def test_poll_unknown_event(self): - x = X(self.app) - writer = Mock(name='reader') - x.hub.add_writer(6, writer, 6) - x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) - poller = x.hub.poller - poller.poll.return_value = [(6, 0)] - with self.assertRaises(socket.error): - asynloop(*x.args) - self.assertTrue(poller.poll.called) - - def test_poll_keep_draining_disabled(self): - x = X(self.app) - x.hub.writers = {6: Mock()} - poll = x.hub.poller.poll - - def se(*args, **kwargs): - poll.side_effect = socket.error() - poll.side_effect = se - - poller = x.hub.poller - poll.return_value = [(6, 0)] - with self.assertRaises(socket.error): - asynloop(*x.args) - self.assertTrue(poller.poll.called) - - def test_poll_err_writable(self): - x = X(self.app) - writer = Mock(name='writer') - x.hub.add_writer(6, writer, 6, 48) - x.hub.on_tick.add(x.close_then_error(Mock(), 2)) - poller = x.hub.poller - poller.poll.return_value = [(6, ERR)] - with self.assertRaises(socket.error): - asynloop(*x.args) - writer.assert_called_with(6, 48) - self.assertTrue(poller.poll.called) - - def test_poll_write_generator(self): - x = X(self.app) - x.hub.remove = Mock(name='hub.remove()') - - def Gen(): - yield 1 - yield 2 - gen = Gen() - - x.hub.add_writer(6, gen) - x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) - x.hub.poller.poll.return_value = [(6, WRITE)] - with self.assertRaises(socket.error): - asynloop(*x.args) - self.assertTrue(gen.gi_frame.f_lasti != -1) - self.assertFalse(x.hub.remove.called) - - def test_poll_write_generator_stopped(self): - x = X(self.app) - - def Gen(): - raise StopIteration() - yield - gen = Gen() - x.hub.add_writer(6, gen) - x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) - x.hub.poller.poll.return_value = [(6, WRITE)] - x.hub.remove = Mock(name='hub.remove()') - with self.assertRaises(socket.error): - asynloop(*x.args) - self.assertIsNone(gen.gi_frame) - - def test_poll_write_generator_raises(self): - x = X(self.app) - - def Gen(): - raise ValueError('foo') - yield - gen = Gen() - x.hub.add_writer(6, gen) - x.hub.remove = Mock(name='hub.remove()') - x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) - x.hub.poller.poll.return_value = [(6, WRITE)] - with self.assertRaises(ValueError): - asynloop(*x.args) - self.assertIsNone(gen.gi_frame) - x.hub.remove.assert_called_with(6) - - def test_poll_err_readable(self): - x = X(self.app) - reader = Mock(name='reader') - x.hub.add_reader(6, reader, 6, 24) - x.hub.on_tick.add(x.close_then_error(Mock(), 2)) - poller = x.hub.poller - poller.poll.return_value = [(6, ERR)] - with self.assertRaises(socket.error): - asynloop(*x.args) - reader.assert_called_with(6, 24) - self.assertTrue(poller.poll.called) - - def test_poll_raises_ValueError(self): - x = X(self.app) - x.hub.readers = {6: Mock()} - poller = x.hub.poller - x.close_then_error(poller.poll, exc=ValueError) - asynloop(*x.args) - self.assertTrue(poller.poll.called) - - -class test_synloop(AppCase): - - def test_timeout_ignored(self): - x = X(self.app) - x.timeout_then_error(x.connection.drain_events) - with self.assertRaises(socket.error): - synloop(*x.args) - self.assertEqual(x.connection.drain_events.call_count, 2) - - def test_updates_qos_when_changed(self): - x = X(self.app) - x.qos.prev = 2 - x.qos.value = 2 - x.timeout_then_error(x.connection.drain_events) - with self.assertRaises(socket.error): - synloop(*x.args) - self.assertFalse(x.qos.update.called) - - x.qos.value = 4 - x.timeout_then_error(x.connection.drain_events) - with self.assertRaises(socket.error): - synloop(*x.args) - x.qos.update.assert_called_with() - - def test_ignores_socket_errors_when_closed(self): - x = X(self.app) - x.close_then_error(x.connection.drain_events) - self.assertIsNone(synloop(*x.args)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_request.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_request.py deleted file mode 100644 index 16efcd7..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_request.py +++ /dev/null @@ -1,969 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import, unicode_literals - -import anyjson -import os -import signal -import socket -import sys - -from datetime import datetime, timedelta - -from billiard.einfo import ExceptionInfo -from kombu.transport.base import Message -from kombu.utils.encoding import from_utf8, default_encode - -from celery import states -from celery.app.trace import ( - trace_task, - _trace_task_ret, - TraceInfo, - mro_lookup, - build_tracer, - setup_worker_optimizations, - reset_worker_optimizations, -) -from celery.concurrency.base import BasePool -from celery.exceptions import ( - Ignore, - InvalidTaskError, - Retry, - TaskRevokedError, - Terminated, - WorkerLostError, -) -from celery.five import keys, monotonic -from celery.signals import task_revoked -from celery.utils import uuid -from celery.worker import job as module -from celery.worker.job import Request, logger as req_logger -from celery.worker.state import revoked - -from celery.tests.case import ( - AppCase, - Case, - Mock, - SkipTest, - assert_signal_called, - body_from_sig, - patch, -) - - -class test_mro_lookup(Case): - - def test_order(self): - - class A(object): - pass - - class B(A): - pass - - class C(B): - pass - - class D(C): - - @classmethod - def mro(cls): - return () - - A.x = 10 - self.assertEqual(mro_lookup(C, 'x'), A) - self.assertIsNone(mro_lookup(C, 'x', stop=(A, ))) - B.x = 10 - self.assertEqual(mro_lookup(C, 'x'), B) - C.x = 10 - self.assertEqual(mro_lookup(C, 'x'), C) - self.assertIsNone(mro_lookup(D, 'x')) - - -def jail(app, task_id, name, args, kwargs): - request = {'id': task_id} - task = app.tasks[name] - task.__trace__ = None # rebuild - return trace_task( - task, task_id, args, kwargs, request=request, eager=False, app=app, - ) - - -class test_default_encode(AppCase): - - def setup(self): - if sys.version_info >= (3, 0): - raise SkipTest('py3k: not relevant') - - def test_jython(self): - prev, sys.platform = sys.platform, 'java 1.6.1' - try: - self.assertEqual(default_encode(bytes('foo')), 'foo') - finally: - sys.platform = prev - - def test_cpython(self): - prev, sys.platform = sys.platform, 'darwin' - gfe, sys.getfilesystemencoding = ( - sys.getfilesystemencoding, - lambda: 'utf-8', - ) - try: - self.assertEqual(default_encode(bytes('foo')), 'foo') - finally: - sys.platform = prev - sys.getfilesystemencoding = gfe - - -class test_Retry(AppCase): - - def test_retry_semipredicate(self): - try: - raise Exception('foo') - except Exception as exc: - ret = Retry('Retrying task', exc) - self.assertEqual(ret.exc, exc) - - -class test_trace_task(AppCase): - - def setup(self): - - @self.app.task(shared=False) - def mytask(i, **kwargs): - return i ** i - self.mytask = mytask - - @self.app.task(shared=False) - def mytask_raising(i): - raise KeyError(i) - self.mytask_raising = mytask_raising - - @patch('celery.app.trace._logger') - def test_process_cleanup_fails(self, _logger): - self.mytask.backend = Mock() - self.mytask.backend.process_cleanup = Mock(side_effect=KeyError()) - tid = uuid() - ret = jail(self.app, tid, self.mytask.name, [2], {}) - self.assertEqual(ret, 4) - self.assertTrue(self.mytask.backend.store_result.called) - self.assertIn('Process cleanup failed', _logger.error.call_args[0][0]) - - def test_process_cleanup_BaseException(self): - self.mytask.backend = Mock() - self.mytask.backend.process_cleanup = Mock(side_effect=SystemExit()) - with self.assertRaises(SystemExit): - jail(self.app, uuid(), self.mytask.name, [2], {}) - - def test_execute_jail_success(self): - ret = jail(self.app, uuid(), self.mytask.name, [2], {}) - self.assertEqual(ret, 4) - - def test_marked_as_started(self): - _started = [] - - def store_result(tid, meta, state, **kwars): - if state == states.STARTED: - _started.append(tid) - self.mytask.backend.store_result = Mock(name='store_result') - self.mytask.backend.store_result.side_effect = store_result - self.mytask.track_started = True - - tid = uuid() - jail(self.app, tid, self.mytask.name, [2], {}) - self.assertIn(tid, _started) - - self.mytask.ignore_result = True - tid = uuid() - jail(self.app, tid, self.mytask.name, [2], {}) - self.assertNotIn(tid, _started) - - def test_execute_jail_failure(self): - ret = jail( - self.app, uuid(), self.mytask_raising.name, [4], {}, - ) - self.assertIsInstance(ret, ExceptionInfo) - self.assertTupleEqual(ret.exception.args, (4, )) - - def test_execute_ignore_result(self): - - @self.app.task(shared=False, ignore_result=True) - def ignores_result(i): - return i ** i - - task_id = uuid() - ret = jail(self.app, task_id, ignores_result.name, [4], {}) - self.assertEqual(ret, 256) - self.assertFalse(self.app.AsyncResult(task_id).ready()) - - -class MockEventDispatcher(object): - - def __init__(self): - self.sent = [] - self.enabled = True - - def send(self, event, **fields): - self.sent.append(event) - - -class test_Request(AppCase): - - def setup(self): - - @self.app.task(shared=False) - def add(x, y, **kw_): - return x + y - self.add = add - - @self.app.task(shared=False) - def mytask(i, **kwargs): - return i ** i - self.mytask = mytask - - @self.app.task(shared=False) - def mytask_raising(i): - raise KeyError(i) - self.mytask_raising = mytask_raising - - def get_request(self, sig, Request=Request, **kwargs): - return Request( - body_from_sig(self.app, sig), - on_ack=Mock(), - eventer=Mock(), - app=self.app, - connection_errors=(socket.error, ), - task=sig.type, - **kwargs - ) - - def test_invalid_eta_raises_InvalidTaskError(self): - with self.assertRaises(InvalidTaskError): - self.get_request(self.add.s(2, 2).set(eta='12345')) - - def test_invalid_expires_raises_InvalidTaskError(self): - with self.assertRaises(InvalidTaskError): - self.get_request(self.add.s(2, 2).set(expires='12345')) - - def test_valid_expires_with_utc_makes_aware(self): - with patch('celery.worker.job.maybe_make_aware') as mma: - self.get_request(self.add.s(2, 2).set(expires=10)) - self.assertTrue(mma.called) - - def test_maybe_expire_when_expires_is_None(self): - req = self.get_request(self.add.s(2, 2)) - self.assertFalse(req.maybe_expire()) - - def test_on_retry_acks_if_late(self): - self.add.acks_late = True - req = self.get_request(self.add.s(2, 2)) - req.on_retry(Mock()) - req.on_ack.assert_called_with(req_logger, req.connection_errors) - - def test_on_failure_Termianted(self): - einfo = None - try: - raise Terminated('9') - except Terminated: - einfo = ExceptionInfo() - self.assertIsNotNone(einfo) - req = self.get_request(self.add.s(2, 2)) - req.on_failure(einfo) - req.eventer.send.assert_called_with( - 'task-revoked', - uuid=req.id, terminated=True, signum='9', expired=False, - ) - - def test_log_error_propagates_MemoryError(self): - einfo = None - try: - raise MemoryError() - except MemoryError: - einfo = ExceptionInfo(internal=True) - self.assertIsNotNone(einfo) - req = self.get_request(self.add.s(2, 2)) - with self.assertRaises(MemoryError): - req._log_error(einfo) - - def test_log_error_when_Ignore(self): - einfo = None - try: - raise Ignore() - except Ignore: - einfo = ExceptionInfo(internal=True) - self.assertIsNotNone(einfo) - req = self.get_request(self.add.s(2, 2)) - req._log_error(einfo) - req.on_ack.assert_called_with(req_logger, req.connection_errors) - - def test_tzlocal_is_cached(self): - req = self.get_request(self.add.s(2, 2)) - req._tzlocal = 'foo' - self.assertEqual(req.tzlocal, 'foo') - - def test_execute_magic_kwargs(self): - task = self.add.s(2, 2) - task.freeze() - req = self.get_request(task) - self.add.accept_magic_kwargs = True - pool = Mock() - req.execute_using_pool(pool) - self.assertTrue(pool.apply_async.called) - args = pool.apply_async.call_args[1]['args'] - self.assertEqual(args[0], task.task) - self.assertEqual(args[1], task.id) - self.assertEqual(args[2], task.args) - kwargs = args[3] - self.assertEqual(kwargs.get('task_name'), task.task) - - def xRequest(self, body=None, **kwargs): - body = dict({'task': self.mytask.name, - 'id': uuid(), - 'args': [1], - 'kwargs': {'f': 'x'}}, **body or {}) - return Request(body, app=self.app, **kwargs) - - def test_task_wrapper_repr(self): - self.assertTrue(repr(self.xRequest())) - - @patch('celery.worker.job.kwdict') - def test_kwdict(self, kwdict): - prev, module.NEEDS_KWDICT = module.NEEDS_KWDICT, True - try: - self.xRequest() - self.assertTrue(kwdict.called) - finally: - module.NEEDS_KWDICT = prev - - def test_sets_store_errors(self): - self.mytask.ignore_result = True - job = self.xRequest() - self.assertFalse(job.store_errors) - - self.mytask.store_errors_even_if_ignored = True - job = self.xRequest() - self.assertTrue(job.store_errors) - - def test_send_event(self): - job = self.xRequest() - job.eventer = MockEventDispatcher() - job.send_event('task-frobulated') - self.assertIn('task-frobulated', job.eventer.sent) - - def test_send_events__disabled_at_task_level(self): - job = self.xRequest() - job.task.send_events = False - job.eventer = Mock(name='.eventer') - job.send_event('task-frobulated') - job.eventer.send.assert_not_called() - - def test_on_retry(self): - job = Request({ - 'task': self.mytask.name, - 'id': uuid(), - 'args': [1], - 'kwargs': {'f': 'x'}, - }, app=self.app) - job.eventer = MockEventDispatcher() - try: - raise Retry('foo', KeyError('moofoobar')) - except: - einfo = ExceptionInfo() - job.on_failure(einfo) - self.assertIn('task-retried', job.eventer.sent) - prev, module._does_info = module._does_info, False - try: - job.on_failure(einfo) - finally: - module._does_info = prev - einfo.internal = True - job.on_failure(einfo) - - def test_compat_properties(self): - job = Request({ - 'task': self.mytask.name, - 'id': uuid(), - 'args': [1], - 'kwargs': {'f': 'x'}, - }, app=self.app) - self.assertEqual(job.task_id, job.id) - self.assertEqual(job.task_name, job.name) - job.task_id = 'ID' - self.assertEqual(job.id, 'ID') - job.task_name = 'NAME' - self.assertEqual(job.name, 'NAME') - - def test_terminate__task_started(self): - pool = Mock() - signum = signal.SIGTERM - job = Request({ - 'task': self.mytask.name, - 'id': uuid(), - 'args': [1], - 'kwrgs': {'f': 'x'}, - }, app=self.app) - with assert_signal_called( - task_revoked, sender=job.task, request=job, - terminated=True, expired=False, signum=signum): - job.time_start = monotonic() - job.worker_pid = 313 - job.terminate(pool, signal='TERM') - pool.terminate_job.assert_called_with(job.worker_pid, signum) - - def test_terminate__task_reserved(self): - pool = Mock() - job = Request({ - 'task': self.mytask.name, - 'id': uuid(), - 'args': [1], - 'kwargs': {'f': 'x'}, - }, app=self.app) - job.time_start = None - job.terminate(pool, signal='TERM') - self.assertFalse(pool.terminate_job.called) - self.assertTupleEqual(job._terminate_on_ack, (pool, 15)) - job.terminate(pool, signal='TERM') - - def test_revoked_expires_expired(self): - job = Request({ - 'task': self.mytask.name, - 'id': uuid(), - 'args': [1], - 'kwargs': {'f': 'x'}, - 'expires': datetime.utcnow() - timedelta(days=1), - }, app=self.app) - with assert_signal_called( - task_revoked, sender=job.task, request=job, - terminated=False, expired=True, signum=None): - job.revoked() - self.assertIn(job.id, revoked) - self.assertEqual( - self.mytask.backend.get_status(job.id), - states.REVOKED, - ) - - def test_revoked_expires_not_expired(self): - job = self.xRequest({ - 'expires': datetime.utcnow() + timedelta(days=1), - }) - job.revoked() - self.assertNotIn(job.id, revoked) - self.assertNotEqual( - self.mytask.backend.get_status(job.id), - states.REVOKED, - ) - - def test_revoked_expires_ignore_result(self): - self.mytask.ignore_result = True - job = self.xRequest({ - 'expires': datetime.utcnow() - timedelta(days=1), - }) - job.revoked() - self.assertIn(job.id, revoked) - self.assertNotEqual( - self.mytask.backend.get_status(job.id), states.REVOKED, - ) - - def test_send_email(self): - app = self.app - mail_sent = [False] - - def mock_mail_admins(*args, **kwargs): - mail_sent[0] = True - - def get_ei(): - try: - raise KeyError('moofoobar') - except: - return ExceptionInfo() - - app.mail_admins = mock_mail_admins - self.mytask.send_error_emails = True - job = self.xRequest() - einfo = get_ei() - job.on_failure(einfo) - self.assertTrue(mail_sent[0]) - - einfo = get_ei() - mail_sent[0] = False - self.mytask.send_error_emails = False - job.on_failure(einfo) - self.assertFalse(mail_sent[0]) - - einfo = get_ei() - mail_sent[0] = False - self.mytask.send_error_emails = True - job.on_failure(einfo) - self.assertTrue(mail_sent[0]) - - def test_already_revoked(self): - job = self.xRequest() - job._already_revoked = True - self.assertTrue(job.revoked()) - - def test_revoked(self): - job = self.xRequest() - with assert_signal_called( - task_revoked, sender=job.task, request=job, - terminated=False, expired=False, signum=None): - revoked.add(job.id) - self.assertTrue(job.revoked()) - self.assertTrue(job._already_revoked) - self.assertTrue(job.acknowledged) - - def test_execute_does_not_execute_revoked(self): - job = self.xRequest() - revoked.add(job.id) - job.execute() - - def test_execute_acks_late(self): - self.mytask_raising.acks_late = True - job = self.xRequest({ - 'task': self.mytask_raising.name, - 'kwargs': {}, - }) - job.execute() - self.assertTrue(job.acknowledged) - job.execute() - - def test_execute_using_pool_does_not_execute_revoked(self): - job = self.xRequest() - revoked.add(job.id) - with self.assertRaises(TaskRevokedError): - job.execute_using_pool(None) - - def test_on_accepted_acks_early(self): - job = self.xRequest() - job.on_accepted(pid=os.getpid(), time_accepted=monotonic()) - self.assertTrue(job.acknowledged) - prev, module._does_debug = module._does_debug, False - try: - job.on_accepted(pid=os.getpid(), time_accepted=monotonic()) - finally: - module._does_debug = prev - - def test_on_accepted_acks_late(self): - job = self.xRequest() - self.mytask.acks_late = True - job.on_accepted(pid=os.getpid(), time_accepted=monotonic()) - self.assertFalse(job.acknowledged) - - def test_on_accepted_terminates(self): - signum = signal.SIGTERM - pool = Mock() - job = self.xRequest() - with assert_signal_called( - task_revoked, sender=job.task, request=job, - terminated=True, expired=False, signum=signum): - job.terminate(pool, signal='TERM') - self.assertFalse(pool.terminate_job.call_count) - job.on_accepted(pid=314, time_accepted=monotonic()) - pool.terminate_job.assert_called_with(314, signum) - - def test_on_success_acks_early(self): - job = self.xRequest() - job.time_start = 1 - job.on_success(42) - prev, module._does_info = module._does_info, False - try: - job.on_success(42) - self.assertFalse(job.acknowledged) - finally: - module._does_info = prev - - def test_on_success_BaseException(self): - job = self.xRequest() - job.time_start = 1 - with self.assertRaises(SystemExit): - try: - raise SystemExit() - except SystemExit: - job.on_success(ExceptionInfo()) - else: - assert False - - def test_on_success_eventer(self): - job = self.xRequest() - job.time_start = 1 - job.eventer = Mock() - job.eventer.send = Mock() - job.on_success(42) - self.assertTrue(job.eventer.send.called) - - def test_on_success_when_failure(self): - job = self.xRequest() - job.time_start = 1 - job.on_failure = Mock() - try: - raise KeyError('foo') - except Exception: - job.on_success(ExceptionInfo()) - self.assertTrue(job.on_failure.called) - - def test_on_success_acks_late(self): - job = self.xRequest() - job.time_start = 1 - self.mytask.acks_late = True - job.on_success(42) - self.assertTrue(job.acknowledged) - - def test_on_failure_WorkerLostError(self): - - def get_ei(): - try: - raise WorkerLostError('do re mi') - except WorkerLostError: - return ExceptionInfo() - - job = self.xRequest() - exc_info = get_ei() - job.on_failure(exc_info) - self.assertEqual( - self.mytask.backend.get_status(job.id), states.FAILURE, - ) - - self.mytask.ignore_result = True - exc_info = get_ei() - job = self.xRequest() - job.on_failure(exc_info) - self.assertEqual( - self.mytask.backend.get_status(job.id), states.PENDING, - ) - - def test_on_failure_acks_late(self): - job = self.xRequest() - job.time_start = 1 - self.mytask.acks_late = True - try: - raise KeyError('foo') - except KeyError: - exc_info = ExceptionInfo() - job.on_failure(exc_info) - self.assertTrue(job.acknowledged) - - def test_from_message_invalid_kwargs(self): - body = dict(task=self.mytask.name, id=1, args=(), kwargs='foo') - with self.assertRaises(InvalidTaskError): - Request(body, message=None, app=self.app) - - @patch('celery.worker.job.error') - @patch('celery.worker.job.warn') - def test_on_timeout(self, warn, error): - - job = self.xRequest() - job.on_timeout(soft=True, timeout=1337) - self.assertIn('Soft time limit', warn.call_args[0][0]) - job.on_timeout(soft=False, timeout=1337) - self.assertIn('Hard time limit', error.call_args[0][0]) - self.assertEqual( - self.mytask.backend.get_status(job.id), states.FAILURE, - ) - - self.mytask.ignore_result = True - job = self.xRequest() - job.on_timeout(soft=True, timeout=1336) - self.assertEqual( - self.mytask.backend.get_status(job.id), states.PENDING, - ) - - def test_fast_trace_task(self): - from celery.app import trace - setup_worker_optimizations(self.app) - self.assertIs(trace.trace_task_ret, trace._fast_trace_task) - try: - self.mytask.__trace__ = build_tracer( - self.mytask.name, self.mytask, self.app.loader, 'test', - app=self.app, - ) - res = trace.trace_task_ret(self.mytask.name, uuid(), [4], {}) - self.assertEqual(res, 4 ** 4) - finally: - reset_worker_optimizations() - self.assertIs(trace.trace_task_ret, trace._trace_task_ret) - delattr(self.mytask, '__trace__') - res = trace.trace_task_ret( - self.mytask.name, uuid(), [4], {}, app=self.app, - ) - self.assertEqual(res, 4 ** 4) - - def test_trace_task_ret(self): - self.mytask.__trace__ = build_tracer( - self.mytask.name, self.mytask, self.app.loader, 'test', - app=self.app, - ) - res = _trace_task_ret(self.mytask.name, uuid(), [4], {}, app=self.app) - self.assertEqual(res, 4 ** 4) - - def test_trace_task_ret__no_trace(self): - try: - delattr(self.mytask, '__trace__') - except AttributeError: - pass - res = _trace_task_ret(self.mytask.name, uuid(), [4], {}, app=self.app) - self.assertEqual(res, 4 ** 4) - - def test_trace_catches_exception(self): - - def _error_exec(self, *args, **kwargs): - raise KeyError('baz') - - @self.app.task(request=None, shared=False) - def raising(): - raise KeyError('baz') - - with self.assertWarnsRegex(RuntimeWarning, - r'Exception raised outside'): - res = trace_task(raising, uuid(), [], {}, app=self.app) - self.assertIsInstance(res, ExceptionInfo) - - def test_worker_task_trace_handle_retry(self): - tid = uuid() - self.mytask.push_request(id=tid) - try: - raise ValueError('foo') - except Exception as exc: - try: - raise Retry(str(exc), exc=exc) - except Retry as exc: - w = TraceInfo(states.RETRY, exc) - w.handle_retry(self.mytask, store_errors=False) - self.assertEqual( - self.mytask.backend.get_status(tid), states.PENDING, - ) - w.handle_retry(self.mytask, store_errors=True) - self.assertEqual( - self.mytask.backend.get_status(tid), states.RETRY, - ) - finally: - self.mytask.pop_request() - - def test_worker_task_trace_handle_failure(self): - tid = uuid() - self.mytask.push_request() - try: - self.mytask.request.id = tid - try: - raise ValueError('foo') - except Exception as exc: - w = TraceInfo(states.FAILURE, exc) - w.handle_failure(self.mytask, store_errors=False) - self.assertEqual( - self.mytask.backend.get_status(tid), states.PENDING, - ) - w.handle_failure(self.mytask, store_errors=True) - self.assertEqual( - self.mytask.backend.get_status(tid), states.FAILURE, - ) - finally: - self.mytask.pop_request() - - def test_task_wrapper_mail_attrs(self): - job = self.xRequest({'args': [], 'kwargs': {}}) - x = job.success_msg % { - 'name': job.name, - 'id': job.id, - 'return_value': 10, - 'runtime': 0.3641, - } - self.assertTrue(x) - x = job.error_msg % { - 'name': job.name, - 'id': job.id, - 'exc': 'FOOBARBAZ', - 'description': 'raised unexpected', - 'traceback': 'foobarbaz', - } - self.assertTrue(x) - - def test_from_message(self): - us = 'æØåveéðƒeæ' - body = {'task': self.mytask.name, 'id': uuid(), - 'args': [2], 'kwargs': {us: 'bar'}} - m = Message(None, body=anyjson.dumps(body), backend='foo', - content_type='application/json', - content_encoding='utf-8') - job = Request(m.decode(), message=m, app=self.app) - self.assertIsInstance(job, Request) - self.assertEqual(job.name, body['task']) - self.assertEqual(job.id, body['id']) - self.assertEqual(job.args, body['args']) - us = from_utf8(us) - if sys.version_info < (2, 6): - self.assertEqual(next(keys(job.kwargs)), us) - self.assertIsInstance(next(keys(job.kwargs)), str) - - def test_from_message_empty_args(self): - body = {'task': self.mytask.name, 'id': uuid()} - m = Message(None, body=anyjson.dumps(body), backend='foo', - content_type='application/json', - content_encoding='utf-8') - job = Request(m.decode(), message=m, app=self.app) - self.assertIsInstance(job, Request) - self.assertEqual(job.args, []) - self.assertEqual(job.kwargs, {}) - - def test_from_message_missing_required_fields(self): - body = {} - m = Message(None, body=anyjson.dumps(body), backend='foo', - content_type='application/json', - content_encoding='utf-8') - with self.assertRaises(KeyError): - Request(m.decode(), message=m, app=self.app) - - def test_from_message_nonexistant_task(self): - body = {'task': 'cu.mytask.doesnotexist', 'id': uuid(), - 'args': [2], 'kwargs': {'æØåveéðƒeæ': 'bar'}} - m = Message(None, body=anyjson.dumps(body), backend='foo', - content_type='application/json', - content_encoding='utf-8') - with self.assertRaises(KeyError): - Request(m.decode(), message=m, app=self.app) - - def test_execute(self): - tid = uuid() - job = self.xRequest({'id': tid, 'args': [4], 'kwargs': {}}) - self.assertEqual(job.execute(), 256) - meta = self.mytask.backend.get_task_meta(tid) - self.assertEqual(meta['status'], states.SUCCESS) - self.assertEqual(meta['result'], 256) - - def test_execute_success_no_kwargs(self): - - @self.app.task # traverses coverage for decorator without parens - def mytask_no_kwargs(i): - return i ** i - - tid = uuid() - job = self.xRequest({ - 'task': mytask_no_kwargs.name, - 'id': tid, - 'args': [4], - 'kwargs': {}, - }) - self.assertEqual(job.execute(), 256) - meta = mytask_no_kwargs.backend.get_task_meta(tid) - self.assertEqual(meta['result'], 256) - self.assertEqual(meta['status'], states.SUCCESS) - - def test_execute_success_some_kwargs(self): - scratch = {'task_id': None} - - @self.app.task(shared=False, accept_magic_kwargs=True) - def mytask_some_kwargs(i, task_id): - scratch['task_id'] = task_id - return i ** i - - tid = uuid() - job = self.xRequest({ - 'task': mytask_some_kwargs.name, - 'id': tid, - 'args': [4], - 'kwargs': {}, - }) - self.assertEqual(job.execute(), 256) - meta = mytask_some_kwargs.backend.get_task_meta(tid) - self.assertEqual(scratch.get('task_id'), tid) - self.assertEqual(meta['result'], 256) - self.assertEqual(meta['status'], states.SUCCESS) - - def test_execute_ack(self): - scratch = {'ACK': False} - - def on_ack(*args, **kwargs): - scratch['ACK'] = True - - tid = uuid() - job = self.xRequest({'id': tid, 'args': [4]}, on_ack=on_ack) - self.assertEqual(job.execute(), 256) - meta = self.mytask.backend.get_task_meta(tid) - self.assertTrue(scratch['ACK']) - self.assertEqual(meta['result'], 256) - self.assertEqual(meta['status'], states.SUCCESS) - - def test_execute_fail(self): - tid = uuid() - job = self.xRequest({ - 'task': self.mytask_raising.name, - 'id': tid, - 'args': [4], - 'kwargs': {}, - }) - self.assertIsInstance(job.execute(), ExceptionInfo) - meta = self.mytask_raising.backend.get_task_meta(tid) - self.assertEqual(meta['status'], states.FAILURE) - self.assertIsInstance(meta['result'], KeyError) - - def test_execute_using_pool(self): - tid = uuid() - job = self.xRequest({'id': tid, 'args': [4]}) - - class MockPool(BasePool): - target = None - args = None - kwargs = None - - def __init__(self, *args, **kwargs): - pass - - def apply_async(self, target, args=None, kwargs=None, - *margs, **mkwargs): - self.target = target - self.args = args - self.kwargs = kwargs - - p = MockPool() - job.execute_using_pool(p) - self.assertTrue(p.target) - self.assertEqual(p.args[0], self.mytask.name) - self.assertEqual(p.args[1], tid) - self.assertEqual(p.args[2], [4]) - self.assertIn('f', p.args[3]) - self.assertIn([4], p.args) - - job.task.accept_magic_kwargs = False - job.execute_using_pool(p) - - def test_default_kwargs(self): - self.maxDiff = 3000 - tid = uuid() - job = self.xRequest({'id': tid, 'args': [4]}) - self.assertDictEqual( - job.extend_with_default_kwargs(), { - 'f': 'x', - 'logfile': None, - 'loglevel': None, - 'task_id': job.id, - 'task_retries': 0, - 'task_is_eager': False, - 'delivery_info': { - 'exchange': None, - 'routing_key': None, - 'priority': 0, - 'redelivered': False, - }, - 'task_name': job.name}) - - @patch('celery.worker.job.logger') - def _test_on_failure(self, exception, logger): - app = self.app - tid = uuid() - job = self.xRequest({'id': tid, 'args': [4]}) - try: - raise exception - except Exception: - exc_info = ExceptionInfo() - app.conf.CELERY_SEND_TASK_ERROR_EMAILS = True - job.on_failure(exc_info) - self.assertTrue(logger.log.called) - context = logger.log.call_args[0][2] - self.assertEqual(self.mytask.name, context['name']) - self.assertIn(tid, context['id']) - - def test_on_failure(self): - self._test_on_failure(Exception('Inside unit tests')) - - def test_on_failure_unicode_exception(self): - self._test_on_failure(Exception('Бобры атакуют')) - - def test_on_failure_utf8_exception(self): - self._test_on_failure(Exception( - from_utf8('Бобры атакуют'))) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_revoke.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_revoke.py deleted file mode 100644 index 4d5ad02..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_revoke.py +++ /dev/null @@ -1,13 +0,0 @@ -from __future__ import absolute_import - -from celery.worker import state -from celery.tests.case import AppCase - - -class test_revoked(AppCase): - - def test_is_working(self): - state.revoked.add('foo') - self.assertIn('foo', state.revoked) - state.revoked.pop_value('foo') - self.assertNotIn('foo', state.revoked) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_state.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_state.py deleted file mode 100644 index ede9a00..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_state.py +++ /dev/null @@ -1,161 +0,0 @@ -from __future__ import absolute_import - -import pickle - -from time import time - -from celery.datastructures import LimitedSet -from celery.exceptions import WorkerShutdown, WorkerTerminate -from celery.worker import state - -from celery.tests.case import AppCase, Mock, patch - - -class StateResetCase(AppCase): - - def setup(self): - self.reset_state() - - def teardown(self): - self.reset_state() - - def reset_state(self): - state.active_requests.clear() - state.revoked.clear() - state.total_count.clear() - - -class MockShelve(dict): - filename = None - in_sync = False - closed = False - - def open(self, filename, **kwargs): - self.filename = filename - return self - - def sync(self): - self.in_sync = True - - def close(self): - self.closed = True - - -class MyPersistent(state.Persistent): - storage = MockShelve() - - -class test_maybe_shutdown(AppCase): - - def teardown(self): - state.should_stop = False - state.should_terminate = False - - def test_should_stop(self): - state.should_stop = True - with self.assertRaises(WorkerShutdown): - state.maybe_shutdown() - - def test_should_terminate(self): - state.should_terminate = True - with self.assertRaises(WorkerTerminate): - state.maybe_shutdown() - - -class test_Persistent(StateResetCase): - - def setup(self): - self.reset_state() - self.p = MyPersistent(state, filename='celery-state') - - def test_close_twice(self): - self.p._is_open = False - self.p.close() - - def test_constructor(self): - self.assertDictEqual(self.p.db, {}) - self.assertEqual(self.p.db.filename, self.p.filename) - - def test_save(self): - self.p.db['foo'] = 'bar' - self.p.save() - self.assertTrue(self.p.db.in_sync) - self.assertTrue(self.p.db.closed) - - def add_revoked(self, *ids): - for id in ids: - self.p.db.setdefault('revoked', LimitedSet()).add(id) - - def test_merge(self, data=['foo', 'bar', 'baz']): - self.add_revoked(*data) - self.p.merge() - for item in data: - self.assertIn(item, state.revoked) - - def test_merge_dict(self): - self.p.clock = Mock() - self.p.clock.adjust.return_value = 626 - d = {'revoked': {'abc': time()}, 'clock': 313} - self.p._merge_with(d) - self.p.clock.adjust.assert_called_with(313) - self.assertEqual(d['clock'], 626) - self.assertIn('abc', state.revoked) - - def test_sync_clock_and_purge(self): - passthrough = Mock() - passthrough.side_effect = lambda x: x - with patch('celery.worker.state.revoked') as revoked: - d = {'clock': 0} - self.p.clock = Mock() - self.p.clock.forward.return_value = 627 - self.p._dumps = passthrough - self.p.compress = passthrough - self.p._sync_with(d) - revoked.purge.assert_called_with() - self.assertEqual(d['clock'], 627) - self.assertNotIn('revoked', d) - self.assertIs(d['zrevoked'], revoked) - - def test_sync(self, data1=['foo', 'bar', 'baz'], - data2=['baz', 'ini', 'koz']): - self.add_revoked(*data1) - for item in data2: - state.revoked.add(item) - self.p.sync() - - self.assertTrue(self.p.db['zrevoked']) - pickled = self.p.decompress(self.p.db['zrevoked']) - self.assertTrue(pickled) - saved = pickle.loads(pickled) - for item in data2: - self.assertIn(item, saved) - - -class SimpleReq(object): - - def __init__(self, name): - self.name = name - - -class test_state(StateResetCase): - - def test_accepted(self, requests=[SimpleReq('foo'), - SimpleReq('bar'), - SimpleReq('baz'), - SimpleReq('baz')]): - for request in requests: - state.task_accepted(request) - for req in requests: - self.assertIn(req, state.active_requests) - self.assertEqual(state.total_count['foo'], 1) - self.assertEqual(state.total_count['bar'], 1) - self.assertEqual(state.total_count['baz'], 2) - - def test_ready(self, requests=[SimpleReq('foo'), - SimpleReq('bar')]): - for request in requests: - state.task_accepted(request) - self.assertEqual(len(state.active_requests), 2) - for request in requests: - state.task_ready(request) - self.assertEqual(len(state.active_requests), 0) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_strategy.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_strategy.py deleted file mode 100644 index 7edf78b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_strategy.py +++ /dev/null @@ -1,139 +0,0 @@ -from __future__ import absolute_import - -from collections import defaultdict -from contextlib import contextmanager - -from kombu.utils.limits import TokenBucket - -from celery.worker import state -from celery.utils.timeutils import rate - -from celery.tests.case import AppCase, Mock, patch, body_from_sig - - -class test_default_strategy(AppCase): - - def setup(self): - @self.app.task(shared=False) - def add(x, y): - return x + y - - self.add = add - - class Context(object): - - def __init__(self, sig, s, reserved, consumer, message, body): - self.sig = sig - self.s = s - self.reserved = reserved - self.consumer = consumer - self.message = message - self.body = body - - def __call__(self, **kwargs): - return self.s( - self.message, self.body, - self.message.ack, self.message.reject, [], **kwargs - ) - - def was_reserved(self): - return self.reserved.called - - def was_rate_limited(self): - assert not self.was_reserved() - return self.consumer._limit_task.called - - def was_scheduled(self): - assert not self.was_reserved() - assert not self.was_rate_limited() - return self.consumer.timer.call_at.called - - def event_sent(self): - return self.consumer.event_dispatcher.send.call_args - - def get_request(self): - if self.was_reserved(): - return self.reserved.call_args[0][0] - if self.was_rate_limited(): - return self.consumer._limit_task.call_args[0][0] - if self.was_scheduled(): - return self.consumer.timer.call_at.call_args[0][0] - raise ValueError('request not handled') - - @contextmanager - def _context(self, sig, - rate_limits=True, events=True, utc=True, limit=None): - self.assertTrue(sig.type.Strategy) - - reserved = Mock() - consumer = Mock() - consumer.task_buckets = defaultdict(lambda: None) - if limit: - bucket = TokenBucket(rate(limit), capacity=1) - consumer.task_buckets[sig.task] = bucket - consumer.disable_rate_limits = not rate_limits - consumer.event_dispatcher.enabled = events - s = sig.type.start_strategy(self.app, consumer, task_reserved=reserved) - self.assertTrue(s) - - message = Mock() - body = body_from_sig(self.app, sig, utc=utc) - - yield self.Context(sig, s, reserved, consumer, message, body) - - def test_when_logging_disabled(self): - with patch('celery.worker.strategy.logger') as logger: - logger.isEnabledFor.return_value = False - with self._context(self.add.s(2, 2)) as C: - C() - self.assertFalse(logger.info.called) - - def test_task_strategy(self): - with self._context(self.add.s(2, 2)) as C: - C() - self.assertTrue(C.was_reserved()) - req = C.get_request() - C.consumer.on_task_request.assert_called_with(req) - self.assertTrue(C.event_sent()) - - def test_when_events_disabled(self): - with self._context(self.add.s(2, 2), events=False) as C: - C() - self.assertTrue(C.was_reserved()) - self.assertFalse(C.event_sent()) - - def test_eta_task(self): - with self._context(self.add.s(2, 2).set(countdown=10)) as C: - C() - self.assertTrue(C.was_scheduled()) - C.consumer.qos.increment_eventually.assert_called_with() - - def test_eta_task_utc_disabled(self): - with self._context(self.add.s(2, 2).set(countdown=10), utc=False) as C: - C() - self.assertTrue(C.was_scheduled()) - C.consumer.qos.increment_eventually.assert_called_with() - - def test_when_rate_limited(self): - task = self.add.s(2, 2) - with self._context(task, rate_limits=True, limit='1/m') as C: - C() - self.assertTrue(C.was_rate_limited()) - - def test_when_rate_limited__limits_disabled(self): - task = self.add.s(2, 2) - with self._context(task, rate_limits=False, limit='1/m') as C: - C() - self.assertTrue(C.was_reserved()) - - def test_when_revoked(self): - task = self.add.s(2, 2) - task.freeze() - state.revoked.add(task.id) - try: - with self._context(task) as C: - C() - with self.assertRaises(ValueError): - C.get_request() - finally: - state.revoked.discard(task.id) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_worker.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_worker.py deleted file mode 100644 index b700a6c..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_worker.py +++ /dev/null @@ -1,1128 +0,0 @@ -from __future__ import absolute_import, print_function - -import os -import socket - -from collections import deque -from datetime import datetime, timedelta -from threading import Event - -from amqp import ChannelError -from kombu import Connection -from kombu.common import QoS, ignore_errors -from kombu.transport.base import Message - -from celery.app.defaults import DEFAULTS -from celery.bootsteps import RUN, CLOSE, StartStopStep -from celery.concurrency.base import BasePool -from celery.datastructures import AttributeDict -from celery.exceptions import ( - WorkerShutdown, WorkerTerminate, TaskRevokedError, -) -from celery.five import Empty, range, Queue as FastQueue -from celery.utils import uuid -from celery.worker import components -from celery.worker import consumer -from celery.worker.consumer import Consumer as __Consumer -from celery.worker.job import Request -from celery.utils import worker_direct -from celery.utils.serialization import pickle -from celery.utils.timer2 import Timer - -from celery.tests.case import AppCase, Mock, SkipTest, patch, restore_logging - - -def MockStep(step=None): - step = Mock() if step is None else step - step.blueprint = Mock() - step.blueprint.name = 'MockNS' - step.name = 'MockStep(%s)' % (id(step), ) - return step - - -def mock_event_dispatcher(): - evd = Mock(name='event_dispatcher') - evd.groups = ['worker'] - evd._outbound_buffer = deque() - return evd - - -class PlaceHolder(object): - pass - - -def find_step(obj, typ): - return obj.blueprint.steps[typ.name] - - -class Consumer(__Consumer): - - def __init__(self, *args, **kwargs): - kwargs.setdefault('without_mingle', True) # disable Mingle step - kwargs.setdefault('without_gossip', True) # disable Gossip step - kwargs.setdefault('without_heartbeat', True) # disable Heart step - super(Consumer, self).__init__(*args, **kwargs) - - -class _MyKombuConsumer(Consumer): - broadcast_consumer = Mock() - task_consumer = Mock() - - def __init__(self, *args, **kwargs): - kwargs.setdefault('pool', BasePool(2)) - super(_MyKombuConsumer, self).__init__(*args, **kwargs) - - def restart_heartbeat(self): - self.heart = None - - -class MyKombuConsumer(Consumer): - - def loop(self, *args, **kwargs): - pass - - -class MockNode(object): - commands = [] - - def handle_message(self, body, message): - self.commands.append(body.pop('command', None)) - - -class MockEventDispatcher(object): - sent = [] - closed = False - flushed = False - _outbound_buffer = [] - - def send(self, event, *args, **kwargs): - self.sent.append(event) - - def close(self): - self.closed = True - - def flush(self): - self.flushed = True - - -class MockHeart(object): - closed = False - - def stop(self): - self.closed = True - - -def create_message(channel, **data): - data.setdefault('id', uuid()) - channel.no_ack_consumers = set() - m = Message(channel, body=pickle.dumps(dict(**data)), - content_type='application/x-python-serialize', - content_encoding='binary', - delivery_info={'consumer_tag': 'mock'}) - m.accept = ['application/x-python-serialize'] - return m - - -class test_Consumer(AppCase): - - def setup(self): - self.buffer = FastQueue() - self.timer = Timer() - - @self.app.task(shared=False) - def foo_task(x, y, z): - return x * y * z - self.foo_task = foo_task - - def teardown(self): - self.timer.stop() - - def test_info(self): - l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - l.task_consumer = Mock() - l.qos = QoS(l.task_consumer.qos, 10) - l.connection = Mock() - l.connection.info.return_value = {'foo': 'bar'} - l.controller = l.app.WorkController() - l.controller.pool = Mock() - l.controller.pool.info.return_value = [Mock(), Mock()] - l.controller.consumer = l - info = l.controller.stats() - self.assertEqual(info['prefetch_count'], 10) - self.assertTrue(info['broker']) - - def test_start_when_closed(self): - l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - l.blueprint.state = CLOSE - l.start() - - def test_connection(self): - l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - - l.blueprint.start(l) - self.assertIsInstance(l.connection, Connection) - - l.blueprint.state = RUN - l.event_dispatcher = None - l.blueprint.restart(l) - self.assertTrue(l.connection) - - l.blueprint.state = RUN - l.shutdown() - self.assertIsNone(l.connection) - self.assertIsNone(l.task_consumer) - - l.blueprint.start(l) - self.assertIsInstance(l.connection, Connection) - l.blueprint.restart(l) - - l.stop() - l.shutdown() - self.assertIsNone(l.connection) - self.assertIsNone(l.task_consumer) - - def test_close_connection(self): - l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - l.blueprint.state = RUN - step = find_step(l, consumer.Connection) - conn = l.connection = Mock() - step.shutdown(l) - self.assertTrue(conn.close.called) - self.assertIsNone(l.connection) - - l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - eventer = l.event_dispatcher = mock_event_dispatcher() - eventer.enabled = True - heart = l.heart = MockHeart() - l.blueprint.state = RUN - Events = find_step(l, consumer.Events) - Events.shutdown(l) - Heart = find_step(l, consumer.Heart) - Heart.shutdown(l) - self.assertTrue(eventer.close.call_count) - self.assertTrue(heart.closed) - - @patch('celery.worker.consumer.warn') - def test_receive_message_unknown(self, warn): - l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - l.blueprint.state = RUN - l.steps.pop() - backend = Mock() - m = create_message(backend, unknown={'baz': '!!!'}) - l.event_dispatcher = mock_event_dispatcher() - l.node = MockNode() - - callback = self._get_on_message(l) - callback(m.decode(), m) - self.assertTrue(warn.call_count) - - @patch('celery.worker.strategy.to_timestamp') - def test_receive_message_eta_OverflowError(self, to_timestamp): - to_timestamp.side_effect = OverflowError() - l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - l.blueprint.state = RUN - l.steps.pop() - m = create_message(Mock(), task=self.foo_task.name, - args=('2, 2'), - kwargs={}, - eta=datetime.now().isoformat()) - l.event_dispatcher = mock_event_dispatcher() - l.node = MockNode() - l.update_strategies() - l.qos = Mock() - - callback = self._get_on_message(l) - callback(m.decode(), m) - self.assertTrue(m.acknowledged) - - @patch('celery.worker.consumer.error') - def test_receive_message_InvalidTaskError(self, error): - l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - l.blueprint.state = RUN - l.event_dispatcher = mock_event_dispatcher() - l.steps.pop() - m = create_message(Mock(), task=self.foo_task.name, - args=(1, 2), kwargs='foobarbaz', id=1) - l.update_strategies() - l.event_dispatcher = mock_event_dispatcher() - - callback = self._get_on_message(l) - callback(m.decode(), m) - self.assertIn('Received invalid task message', error.call_args[0][0]) - - @patch('celery.worker.consumer.crit') - def test_on_decode_error(self, crit): - l = Consumer(self.buffer.put, timer=self.timer, app=self.app) - - class MockMessage(Mock): - content_type = 'application/x-msgpack' - content_encoding = 'binary' - body = 'foobarbaz' - - message = MockMessage() - l.on_decode_error(message, KeyError('foo')) - self.assertTrue(message.ack.call_count) - self.assertIn("Can't decode message body", crit.call_args[0][0]) - - def _get_on_message(self, l): - if l.qos is None: - l.qos = Mock() - l.event_dispatcher = mock_event_dispatcher() - l.task_consumer = Mock() - l.connection = Mock() - l.connection.drain_events.side_effect = WorkerShutdown() - - with self.assertRaises(WorkerShutdown): - l.loop(*l.loop_args()) - self.assertTrue(l.task_consumer.register_callback.called) - return l.task_consumer.register_callback.call_args[0][0] - - def test_receieve_message(self): - l = Consumer(self.buffer.put, timer=self.timer, app=self.app) - l.blueprint.state = RUN - l.event_dispatcher = mock_event_dispatcher() - m = create_message(Mock(), task=self.foo_task.name, - args=[2, 4, 8], kwargs={}) - l.update_strategies() - callback = self._get_on_message(l) - callback(m.decode(), m) - - in_bucket = self.buffer.get_nowait() - self.assertIsInstance(in_bucket, Request) - self.assertEqual(in_bucket.name, self.foo_task.name) - self.assertEqual(in_bucket.execute(), 2 * 4 * 8) - self.assertTrue(self.timer.empty()) - - def test_start_channel_error(self): - - class MockConsumer(Consumer): - iterations = 0 - - def loop(self, *args, **kwargs): - if not self.iterations: - self.iterations = 1 - raise KeyError('foo') - raise SyntaxError('bar') - - l = MockConsumer(self.buffer.put, timer=self.timer, - send_events=False, pool=BasePool(), app=self.app) - l.channel_errors = (KeyError, ) - with self.assertRaises(KeyError): - l.start() - l.timer.stop() - - def test_start_connection_error(self): - - class MockConsumer(Consumer): - iterations = 0 - - def loop(self, *args, **kwargs): - if not self.iterations: - self.iterations = 1 - raise KeyError('foo') - raise SyntaxError('bar') - - l = MockConsumer(self.buffer.put, timer=self.timer, - send_events=False, pool=BasePool(), app=self.app) - - l.connection_errors = (KeyError, ) - self.assertRaises(SyntaxError, l.start) - l.timer.stop() - - def test_loop_ignores_socket_timeout(self): - - class Connection(self.app.connection().__class__): - obj = None - - def drain_events(self, **kwargs): - self.obj.connection = None - raise socket.timeout(10) - - l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - l.connection = Connection() - l.task_consumer = Mock() - l.connection.obj = l - l.qos = QoS(l.task_consumer.qos, 10) - l.loop(*l.loop_args()) - - def test_loop_when_socket_error(self): - - class Connection(self.app.connection().__class__): - obj = None - - def drain_events(self, **kwargs): - self.obj.connection = None - raise socket.error('foo') - - l = Consumer(self.buffer.put, timer=self.timer, app=self.app) - l.blueprint.state = RUN - c = l.connection = Connection() - l.connection.obj = l - l.task_consumer = Mock() - l.qos = QoS(l.task_consumer.qos, 10) - with self.assertRaises(socket.error): - l.loop(*l.loop_args()) - - l.blueprint.state = CLOSE - l.connection = c - l.loop(*l.loop_args()) - - def test_loop(self): - - class Connection(self.app.connection().__class__): - obj = None - - def drain_events(self, **kwargs): - self.obj.connection = None - - l = Consumer(self.buffer.put, timer=self.timer, app=self.app) - l.blueprint.state = RUN - l.connection = Connection() - l.connection.obj = l - l.task_consumer = Mock() - l.qos = QoS(l.task_consumer.qos, 10) - - l.loop(*l.loop_args()) - l.loop(*l.loop_args()) - self.assertTrue(l.task_consumer.consume.call_count) - l.task_consumer.qos.assert_called_with(prefetch_count=10) - self.assertEqual(l.qos.value, 10) - l.qos.decrement_eventually() - self.assertEqual(l.qos.value, 9) - l.qos.update() - self.assertEqual(l.qos.value, 9) - l.task_consumer.qos.assert_called_with(prefetch_count=9) - - def test_ignore_errors(self): - l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - l.connection_errors = (AttributeError, KeyError, ) - l.channel_errors = (SyntaxError, ) - ignore_errors(l, Mock(side_effect=AttributeError('foo'))) - ignore_errors(l, Mock(side_effect=KeyError('foo'))) - ignore_errors(l, Mock(side_effect=SyntaxError('foo'))) - with self.assertRaises(IndexError): - ignore_errors(l, Mock(side_effect=IndexError('foo'))) - - def test_apply_eta_task(self): - from celery.worker import state - l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - l.qos = QoS(None, 10) - - task = object() - qos = l.qos.value - l.apply_eta_task(task) - self.assertIn(task, state.reserved_requests) - self.assertEqual(l.qos.value, qos - 1) - self.assertIs(self.buffer.get_nowait(), task) - - def test_receieve_message_eta_isoformat(self): - l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - l.blueprint.state = RUN - l.steps.pop() - m = create_message( - Mock(), task=self.foo_task.name, - eta=(datetime.now() + timedelta(days=1)).isoformat(), - args=[2, 4, 8], kwargs={}, - ) - - l.task_consumer = Mock() - l.qos = QoS(l.task_consumer.qos, 1) - current_pcount = l.qos.value - l.event_dispatcher = mock_event_dispatcher() - l.enabled = False - l.update_strategies() - callback = self._get_on_message(l) - callback(m.decode(), m) - l.timer.stop() - l.timer.join(1) - - items = [entry[2] for entry in self.timer.queue] - found = 0 - for item in items: - if item.args[0].name == self.foo_task.name: - found = True - self.assertTrue(found) - self.assertGreater(l.qos.value, current_pcount) - l.timer.stop() - - def test_pidbox_callback(self): - l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - con = find_step(l, consumer.Control).box - con.node = Mock() - con.reset = Mock() - - con.on_message('foo', 'bar') - con.node.handle_message.assert_called_with('foo', 'bar') - - con.node = Mock() - con.node.handle_message.side_effect = KeyError('foo') - con.on_message('foo', 'bar') - con.node.handle_message.assert_called_with('foo', 'bar') - - con.node = Mock() - con.node.handle_message.side_effect = ValueError('foo') - con.on_message('foo', 'bar') - con.node.handle_message.assert_called_with('foo', 'bar') - self.assertTrue(con.reset.called) - - def test_revoke(self): - l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - l.blueprint.state = RUN - l.steps.pop() - backend = Mock() - id = uuid() - t = create_message(backend, task=self.foo_task.name, args=[2, 4, 8], - kwargs={}, id=id) - from celery.worker.state import revoked - revoked.add(id) - - callback = self._get_on_message(l) - callback(t.decode(), t) - self.assertTrue(self.buffer.empty()) - - def test_receieve_message_not_registered(self): - l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - l.blueprint.state = RUN - l.steps.pop() - backend = Mock() - m = create_message(backend, task='x.X.31x', args=[2, 4, 8], kwargs={}) - - l.event_dispatcher = mock_event_dispatcher() - callback = self._get_on_message(l) - self.assertFalse(callback(m.decode(), m)) - with self.assertRaises(Empty): - self.buffer.get_nowait() - self.assertTrue(self.timer.empty()) - - @patch('celery.worker.consumer.warn') - @patch('celery.worker.consumer.logger') - def test_receieve_message_ack_raises(self, logger, warn): - l = Consumer(self.buffer.put, timer=self.timer, app=self.app) - l.blueprint.state = RUN - backend = Mock() - m = create_message(backend, args=[2, 4, 8], kwargs={}) - - l.event_dispatcher = mock_event_dispatcher() - l.connection_errors = (socket.error, ) - m.reject = Mock() - m.reject.side_effect = socket.error('foo') - callback = self._get_on_message(l) - self.assertFalse(callback(m.decode(), m)) - self.assertTrue(warn.call_count) - with self.assertRaises(Empty): - self.buffer.get_nowait() - self.assertTrue(self.timer.empty()) - m.reject.assert_called_with(requeue=False) - self.assertTrue(logger.critical.call_count) - - def test_receive_message_eta(self): - import sys - from functools import partial - if os.environ.get('C_DEBUG_TEST'): - pp = partial(print, file=sys.__stderr__) - else: - def pp(*args, **kwargs): - pass - pp('TEST RECEIVE MESSAGE ETA') - pp('+CREATE MYKOMBUCONSUMER') - l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - pp('-CREATE MYKOMBUCONSUMER') - l.steps.pop() - l.event_dispatcher = mock_event_dispatcher() - backend = Mock() - pp('+ CREATE MESSAGE') - m = create_message( - backend, task=self.foo_task.name, - args=[2, 4, 8], kwargs={}, - eta=(datetime.now() + timedelta(days=1)).isoformat(), - ) - pp('- CREATE MESSAGE') - - try: - pp('+ BLUEPRINT START 1') - l.blueprint.start(l) - pp('- BLUEPRINT START 1') - p = l.app.conf.BROKER_CONNECTION_RETRY - l.app.conf.BROKER_CONNECTION_RETRY = False - pp('+ BLUEPRINT START 2') - l.blueprint.start(l) - pp('- BLUEPRINT START 2') - l.app.conf.BROKER_CONNECTION_RETRY = p - pp('+ BLUEPRINT RESTART') - l.blueprint.restart(l) - pp('- BLUEPRINT RESTART') - l.event_dispatcher = mock_event_dispatcher() - pp('+ GET ON MESSAGE') - callback = self._get_on_message(l) - pp('- GET ON MESSAGE') - pp('+ CALLBACK') - callback(m.decode(), m) - pp('- CALLBACK') - finally: - pp('+ STOP TIMER') - l.timer.stop() - pp('- STOP TIMER') - try: - pp('+ JOIN TIMER') - l.timer.join() - pp('- JOIN TIMER') - except RuntimeError: - pass - - in_hold = l.timer.queue[0] - self.assertEqual(len(in_hold), 3) - eta, priority, entry = in_hold - task = entry.args[0] - self.assertIsInstance(task, Request) - self.assertEqual(task.name, self.foo_task.name) - self.assertEqual(task.execute(), 2 * 4 * 8) - with self.assertRaises(Empty): - self.buffer.get_nowait() - - def test_reset_pidbox_node(self): - l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - con = find_step(l, consumer.Control).box - con.node = Mock() - chan = con.node.channel = Mock() - l.connection = Mock() - chan.close.side_effect = socket.error('foo') - l.connection_errors = (socket.error, ) - con.reset() - chan.close.assert_called_with() - - def test_reset_pidbox_node_green(self): - from celery.worker.pidbox import gPidbox - pool = Mock() - pool.is_green = True - l = MyKombuConsumer(self.buffer.put, timer=self.timer, pool=pool, - app=self.app) - con = find_step(l, consumer.Control) - self.assertIsInstance(con.box, gPidbox) - con.start(l) - l.pool.spawn_n.assert_called_with( - con.box.loop, l, - ) - - def test__green_pidbox_node(self): - pool = Mock() - pool.is_green = True - l = MyKombuConsumer(self.buffer.put, timer=self.timer, pool=pool, - app=self.app) - l.node = Mock() - controller = find_step(l, consumer.Control) - - class BConsumer(Mock): - - def __enter__(self): - self.consume() - return self - - def __exit__(self, *exc_info): - self.cancel() - - controller.box.node.listen = BConsumer() - connections = [] - - class Connection(object): - calls = 0 - - def __init__(self, obj): - connections.append(self) - self.obj = obj - self.default_channel = self.channel() - self.closed = False - - def __enter__(self): - return self - - def __exit__(self, *exc_info): - self.close() - - def channel(self): - return Mock() - - def as_uri(self): - return 'dummy://' - - def drain_events(self, **kwargs): - if not self.calls: - self.calls += 1 - raise socket.timeout() - self.obj.connection = None - controller.box._node_shutdown.set() - - def close(self): - self.closed = True - - l.connection = Mock() - l.connect = lambda: Connection(obj=l) - controller = find_step(l, consumer.Control) - controller.box.loop(l) - - self.assertTrue(controller.box.node.listen.called) - self.assertTrue(controller.box.consumer) - controller.box.consumer.consume.assert_called_with() - - self.assertIsNone(l.connection) - self.assertTrue(connections[0].closed) - - @patch('kombu.connection.Connection._establish_connection') - @patch('kombu.utils.sleep') - def test_connect_errback(self, sleep, connect): - l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - from kombu.transport.memory import Transport - Transport.connection_errors = (ChannelError, ) - - def effect(): - if connect.call_count > 1: - return - raise ChannelError('error') - connect.side_effect = effect - l.connect() - connect.assert_called_with() - - def test_stop_pidbox_node(self): - l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - cont = find_step(l, consumer.Control) - cont._node_stopped = Event() - cont._node_shutdown = Event() - cont._node_stopped.set() - cont.stop(l) - - def test_start__loop(self): - - class _QoS(object): - prev = 3 - value = 4 - - def update(self): - self.prev = self.value - - class _Consumer(MyKombuConsumer): - iterations = 0 - - def reset_connection(self): - if self.iterations >= 1: - raise KeyError('foo') - - init_callback = Mock() - l = _Consumer(self.buffer.put, timer=self.timer, - init_callback=init_callback, app=self.app) - l.task_consumer = Mock() - l.broadcast_consumer = Mock() - l.qos = _QoS() - l.connection = Connection() - l.iterations = 0 - - def raises_KeyError(*args, **kwargs): - l.iterations += 1 - if l.qos.prev != l.qos.value: - l.qos.update() - if l.iterations >= 2: - raise KeyError('foo') - - l.loop = raises_KeyError - with self.assertRaises(KeyError): - l.start() - self.assertEqual(l.iterations, 2) - self.assertEqual(l.qos.prev, l.qos.value) - - init_callback.reset_mock() - l = _Consumer(self.buffer.put, timer=self.timer, app=self.app, - send_events=False, init_callback=init_callback) - l.qos = _QoS() - l.task_consumer = Mock() - l.broadcast_consumer = Mock() - l.connection = Connection() - l.loop = Mock(side_effect=socket.error('foo')) - with self.assertRaises(socket.error): - l.start() - self.assertTrue(l.loop.call_count) - - def test_reset_connection_with_no_node(self): - l = Consumer(self.buffer.put, timer=self.timer, app=self.app) - l.steps.pop() - self.assertEqual(None, l.pool) - l.blueprint.start(l) - - -class test_WorkController(AppCase): - - def setup(self): - self.worker = self.create_worker() - from celery import worker - self._logger = worker.logger - self._comp_logger = components.logger - self.logger = worker.logger = Mock() - self.comp_logger = components.logger = Mock() - - @self.app.task(shared=False) - def foo_task(x, y, z): - return x * y * z - self.foo_task = foo_task - - def teardown(self): - from celery import worker - worker.logger = self._logger - components.logger = self._comp_logger - - def create_worker(self, **kw): - worker = self.app.WorkController(concurrency=1, loglevel=0, **kw) - worker.blueprint.shutdown_complete.set() - return worker - - def test_on_consumer_ready(self): - self.worker.on_consumer_ready(Mock()) - - def test_setup_queues_worker_direct(self): - self.app.conf.CELERY_WORKER_DIRECT = True - self.app.amqp.__dict__['queues'] = Mock() - self.worker.setup_queues({}) - self.app.amqp.queues.select_add.assert_called_with( - worker_direct(self.worker.hostname), - ) - - def test_send_worker_shutdown(self): - with patch('celery.signals.worker_shutdown') as ws: - self.worker._send_worker_shutdown() - ws.send.assert_called_with(sender=self.worker) - - def test_process_shutdown_on_worker_shutdown(self): - raise SkipTest('unstable test') - from celery.concurrency.prefork import process_destructor - from celery.concurrency.asynpool import Worker - with patch('celery.signals.worker_process_shutdown') as ws: - Worker._make_shortcuts = Mock() - with patch('os._exit') as _exit: - worker = Worker(None, None, on_exit=process_destructor) - worker._do_exit(22, 3.1415926) - ws.send.assert_called_with( - sender=None, pid=22, exitcode=3.1415926, - ) - _exit.assert_called_with(3.1415926) - - def test_process_task_revoked_release_semaphore(self): - self.worker._quick_release = Mock() - req = Mock() - req.execute_using_pool.side_effect = TaskRevokedError - self.worker._process_task(req) - self.worker._quick_release.assert_called_with() - - delattr(self.worker, '_quick_release') - self.worker._process_task(req) - - def test_shutdown_no_blueprint(self): - self.worker.blueprint = None - self.worker._shutdown() - - @patch('celery.platforms.create_pidlock') - def test_use_pidfile(self, create_pidlock): - create_pidlock.return_value = Mock() - worker = self.create_worker(pidfile='pidfilelockfilepid') - worker.steps = [] - worker.start() - self.assertTrue(create_pidlock.called) - worker.stop() - self.assertTrue(worker.pidlock.release.called) - - @patch('celery.platforms.signals') - @patch('celery.platforms.set_mp_process_title') - def test_process_initializer(self, set_mp_process_title, _signals): - with restore_logging(): - from celery import signals - from celery._state import _tls - from celery.concurrency.prefork import ( - process_initializer, WORKER_SIGRESET, WORKER_SIGIGNORE, - ) - - def on_worker_process_init(**kwargs): - on_worker_process_init.called = True - on_worker_process_init.called = False - signals.worker_process_init.connect(on_worker_process_init) - - def Loader(*args, **kwargs): - loader = Mock(*args, **kwargs) - loader.conf = {} - loader.override_backends = {} - return loader - - with self.Celery(loader=Loader) as app: - app.conf = AttributeDict(DEFAULTS) - process_initializer(app, 'awesome.worker.com') - _signals.ignore.assert_any_call(*WORKER_SIGIGNORE) - _signals.reset.assert_any_call(*WORKER_SIGRESET) - self.assertTrue(app.loader.init_worker.call_count) - self.assertTrue(on_worker_process_init.called) - self.assertIs(_tls.current_app, app) - set_mp_process_title.assert_called_with( - 'celeryd', hostname='awesome.worker.com', - ) - - with patch('celery.app.trace.setup_worker_optimizations') as S: - os.environ['FORKED_BY_MULTIPROCESSING'] = "1" - try: - process_initializer(app, 'luke.worker.com') - S.assert_called_with(app) - finally: - os.environ.pop('FORKED_BY_MULTIPROCESSING', None) - - def test_attrs(self): - worker = self.worker - self.assertIsNotNone(worker.timer) - self.assertIsInstance(worker.timer, Timer) - self.assertIsNotNone(worker.pool) - self.assertIsNotNone(worker.consumer) - self.assertTrue(worker.steps) - - def test_with_embedded_beat(self): - worker = self.app.WorkController(concurrency=1, loglevel=0, beat=True) - self.assertTrue(worker.beat) - self.assertIn(worker.beat, [w.obj for w in worker.steps]) - - def test_with_autoscaler(self): - worker = self.create_worker( - autoscale=[10, 3], send_events=False, - timer_cls='celery.utils.timer2.Timer', - ) - self.assertTrue(worker.autoscaler) - - def test_dont_stop_or_terminate(self): - worker = self.app.WorkController(concurrency=1, loglevel=0) - worker.stop() - self.assertNotEqual(worker.blueprint.state, CLOSE) - worker.terminate() - self.assertNotEqual(worker.blueprint.state, CLOSE) - - sigsafe, worker.pool.signal_safe = worker.pool.signal_safe, False - try: - worker.blueprint.state = RUN - worker.stop(in_sighandler=True) - self.assertNotEqual(worker.blueprint.state, CLOSE) - worker.terminate(in_sighandler=True) - self.assertNotEqual(worker.blueprint.state, CLOSE) - finally: - worker.pool.signal_safe = sigsafe - - def test_on_timer_error(self): - worker = self.app.WorkController(concurrency=1, loglevel=0) - - try: - raise KeyError('foo') - except KeyError as exc: - components.Timer(worker).on_timer_error(exc) - msg, args = self.comp_logger.error.call_args[0] - self.assertIn('KeyError', msg % args) - - def test_on_timer_tick(self): - worker = self.app.WorkController(concurrency=1, loglevel=10) - - components.Timer(worker).on_timer_tick(30.0) - xargs = self.comp_logger.debug.call_args[0] - fmt, arg = xargs[0], xargs[1] - self.assertEqual(30.0, arg) - self.assertIn('Next eta %s secs', fmt) - - def test_process_task(self): - worker = self.worker - worker.pool = Mock() - backend = Mock() - m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10], - kwargs={}) - task = Request(m.decode(), message=m, app=self.app) - worker._process_task(task) - self.assertEqual(worker.pool.apply_async.call_count, 1) - worker.pool.stop() - - def test_process_task_raise_base(self): - worker = self.worker - worker.pool = Mock() - worker.pool.apply_async.side_effect = KeyboardInterrupt('Ctrl+C') - backend = Mock() - m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10], - kwargs={}) - task = Request(m.decode(), message=m, app=self.app) - worker.steps = [] - worker.blueprint.state = RUN - with self.assertRaises(KeyboardInterrupt): - worker._process_task(task) - - def test_process_task_raise_WorkerTerminate(self): - worker = self.worker - worker.pool = Mock() - worker.pool.apply_async.side_effect = WorkerTerminate() - backend = Mock() - m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10], - kwargs={}) - task = Request(m.decode(), message=m, app=self.app) - worker.steps = [] - worker.blueprint.state = RUN - with self.assertRaises(SystemExit): - worker._process_task(task) - - def test_process_task_raise_regular(self): - worker = self.worker - worker.pool = Mock() - worker.pool.apply_async.side_effect = KeyError('some exception') - backend = Mock() - m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10], - kwargs={}) - task = Request(m.decode(), message=m, app=self.app) - worker._process_task(task) - worker.pool.stop() - - def test_start_catches_base_exceptions(self): - worker1 = self.create_worker() - worker1.blueprint.state = RUN - stc = MockStep() - stc.start.side_effect = WorkerTerminate() - worker1.steps = [stc] - worker1.start() - stc.start.assert_called_with(worker1) - self.assertTrue(stc.terminate.call_count) - - worker2 = self.create_worker() - worker2.blueprint.state = RUN - sec = MockStep() - sec.start.side_effect = WorkerShutdown() - sec.terminate = None - worker2.steps = [sec] - worker2.start() - self.assertTrue(sec.stop.call_count) - - def test_state_db(self): - from celery.worker import state - Persistent = state.Persistent - - state.Persistent = Mock() - try: - worker = self.create_worker(state_db='statefilename') - self.assertTrue(worker._persistence) - finally: - state.Persistent = Persistent - - def test_process_task_sem(self): - worker = self.worker - worker._quick_acquire = Mock() - - req = Mock() - worker._process_task_sem(req) - worker._quick_acquire.assert_called_with(worker._process_task, req) - - def test_signal_consumer_close(self): - worker = self.worker - worker.consumer = Mock() - - worker.signal_consumer_close() - worker.consumer.close.assert_called_with() - - worker.consumer.close.side_effect = AttributeError() - worker.signal_consumer_close() - - def test_start__stop(self): - worker = self.worker - worker.blueprint.shutdown_complete.set() - worker.steps = [MockStep(StartStopStep(self)) for _ in range(4)] - worker.blueprint.state = RUN - worker.blueprint.started = 4 - for w in worker.steps: - w.start = Mock() - w.close = Mock() - w.stop = Mock() - - worker.start() - for w in worker.steps: - self.assertTrue(w.start.call_count) - worker.consumer = Mock() - worker.stop() - for stopstep in worker.steps: - self.assertTrue(stopstep.close.call_count) - self.assertTrue(stopstep.stop.call_count) - - # Doesn't close pool if no pool. - worker.start() - worker.pool = None - worker.stop() - - # test that stop of None is not attempted - worker.steps[-1] = None - worker.start() - worker.stop() - - def test_step_raises(self): - worker = self.worker - step = Mock() - worker.steps = [step] - step.start.side_effect = TypeError() - worker.stop = Mock() - worker.start() - worker.stop.assert_called_with() - - def test_state(self): - self.assertTrue(self.worker.state) - - def test_start__terminate(self): - worker = self.worker - worker.blueprint.shutdown_complete.set() - worker.blueprint.started = 5 - worker.blueprint.state = RUN - worker.steps = [MockStep() for _ in range(5)] - worker.start() - for w in worker.steps[:3]: - self.assertTrue(w.start.call_count) - self.assertTrue(worker.blueprint.started, len(worker.steps)) - self.assertEqual(worker.blueprint.state, RUN) - worker.terminate() - for step in worker.steps: - self.assertTrue(step.terminate.call_count) - - def test_Queues_pool_no_sem(self): - w = Mock() - w.pool_cls.uses_semaphore = False - components.Queues(w).create(w) - self.assertIs(w.process_task, w._process_task) - - def test_Hub_crate(self): - w = Mock() - x = components.Hub(w) - x.create(w) - self.assertTrue(w.timer.max_interval) - - def test_Pool_crate_threaded(self): - w = Mock() - w._conninfo.connection_errors = w._conninfo.channel_errors = () - w.pool_cls = Mock() - w.use_eventloop = False - pool = components.Pool(w) - pool.create(w) - - def test_Pool_create(self): - from kombu.async.semaphore import LaxBoundedSemaphore - w = Mock() - w._conninfo.connection_errors = w._conninfo.channel_errors = () - w.hub = Mock() - - PoolImp = Mock() - poolimp = PoolImp.return_value = Mock() - poolimp._pool = [Mock(), Mock()] - poolimp._cache = {} - poolimp._fileno_to_inq = {} - poolimp._fileno_to_outq = {} - - from celery.concurrency.prefork import TaskPool as _TaskPool - - class MockTaskPool(_TaskPool): - Pool = PoolImp - - @property - def timers(self): - return {Mock(): 30} - - w.pool_cls = MockTaskPool - w.use_eventloop = True - w.consumer.restart_count = -1 - pool = components.Pool(w) - pool.create(w) - pool.register_with_event_loop(w, w.hub) - self.assertIsInstance(w.semaphore, LaxBoundedSemaphore) - P = w.pool - P.start() diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/utils/__init__.py deleted file mode 100644 index 20e11f0..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/__init__.py +++ /dev/null @@ -1,407 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils - ~~~~~~~~~~~~ - - Utility functions. - -""" -from __future__ import absolute_import, print_function - -import numbers -import os -import re -import socket -import sys -import traceback -import warnings -import datetime - -from collections import Callable -from functools import partial, wraps -from inspect import getargspec -from pprint import pprint - -from kombu.entity import Exchange, Queue - -from celery.exceptions import CPendingDeprecationWarning, CDeprecationWarning -from celery.five import WhateverIO, items, reraise, string_t - -__all__ = ['worker_direct', 'warn_deprecated', 'deprecated', 'lpmerge', - 'is_iterable', 'isatty', 'cry', 'maybe_reraise', 'strtobool', - 'jsonify', 'gen_task_name', 'nodename', 'nodesplit', - 'cached_property'] - -PY3 = sys.version_info[0] == 3 - - -PENDING_DEPRECATION_FMT = """ - {description} is scheduled for deprecation in \ - version {deprecation} and removal in version v{removal}. \ - {alternative} -""" - -DEPRECATION_FMT = """ - {description} is deprecated and scheduled for removal in - version {removal}. {alternative} -""" - -UNKNOWN_SIMPLE_FORMAT_KEY = """ -Unknown format %{0} in string {1!r}. -Possible causes: Did you forget to escape the expand sign (use '%%{0!r}'), -or did you escape and the value was expanded twice? (%%N -> %N -> %hostname)? -""".strip() - -#: Billiard sets this when execv is enabled. -#: We use it to find out the name of the original ``__main__`` -#: module, so that we can properly rewrite the name of the -#: task to be that of ``App.main``. -MP_MAIN_FILE = os.environ.get('MP_MAIN_FILE') or None - -#: Exchange for worker direct queues. -WORKER_DIRECT_EXCHANGE = Exchange('C.dq') - -#: Format for worker direct queue names. -WORKER_DIRECT_QUEUE_FORMAT = '{hostname}.dq' - -#: Separator for worker node name and hostname. -NODENAME_SEP = '@' - -NODENAME_DEFAULT = 'celery' -RE_FORMAT = re.compile(r'%(\w)') - - -def worker_direct(hostname): - """Return :class:`kombu.Queue` that is a direct route to - a worker by hostname. - - :param hostname: The fully qualified node name of a worker - (e.g. ``w1@example.com``). If passed a - :class:`kombu.Queue` instance it will simply return - that instead. - """ - if isinstance(hostname, Queue): - return hostname - return Queue(WORKER_DIRECT_QUEUE_FORMAT.format(hostname=hostname), - WORKER_DIRECT_EXCHANGE, - hostname, auto_delete=True) - - -def warn_deprecated(description=None, deprecation=None, - removal=None, alternative=None, stacklevel=2): - ctx = {'description': description, - 'deprecation': deprecation, 'removal': removal, - 'alternative': alternative} - if deprecation is not None: - w = CPendingDeprecationWarning(PENDING_DEPRECATION_FMT.format(**ctx)) - else: - w = CDeprecationWarning(DEPRECATION_FMT.format(**ctx)) - warnings.warn(w, stacklevel=stacklevel) - - -def deprecated(deprecation=None, removal=None, - alternative=None, description=None): - """Decorator for deprecated functions. - - A deprecation warning will be emitted when the function is called. - - :keyword deprecation: Version that marks first deprecation, if this - argument is not set a ``PendingDeprecationWarning`` will be emitted - instead. - :keyword removal: Future version when this feature will be removed. - :keyword alternative: Instructions for an alternative solution (if any). - :keyword description: Description of what is being deprecated. - - """ - def _inner(fun): - - @wraps(fun) - def __inner(*args, **kwargs): - from .imports import qualname - warn_deprecated(description=description or qualname(fun), - deprecation=deprecation, - removal=removal, - alternative=alternative, - stacklevel=3) - return fun(*args, **kwargs) - return __inner - return _inner - - -def deprecated_property(deprecation=None, removal=None, - alternative=None, description=None): - def _inner(fun): - return _deprecated_property( - fun, deprecation=deprecation, removal=removal, - alternative=alternative, description=description or fun.__name__) - return _inner - - -class _deprecated_property(object): - - def __init__(self, fget=None, fset=None, fdel=None, doc=None, **depreinfo): - self.__get = fget - self.__set = fset - self.__del = fdel - self.__name__, self.__module__, self.__doc__ = ( - fget.__name__, fget.__module__, fget.__doc__, - ) - self.depreinfo = depreinfo - self.depreinfo.setdefault('stacklevel', 3) - - def __get__(self, obj, type=None): - if obj is None: - return self - warn_deprecated(**self.depreinfo) - return self.__get(obj) - - def __set__(self, obj, value): - if obj is None: - return self - if self.__set is None: - raise AttributeError('cannot set attribute') - warn_deprecated(**self.depreinfo) - self.__set(obj, value) - - def __delete__(self, obj): - if obj is None: - return self - if self.__del is None: - raise AttributeError('cannot delete attribute') - warn_deprecated(**self.depreinfo) - self.__del(obj) - - def setter(self, fset): - return self.__class__(self.__get, fset, self.__del, **self.depreinfo) - - def deleter(self, fdel): - return self.__class__(self.__get, self.__set, fdel, **self.depreinfo) - - -def lpmerge(L, R): - """In place left precedent dictionary merge. - - Keeps values from `L`, if the value in `R` is :const:`None`.""" - set = L.__setitem__ - [set(k, v) for k, v in items(R) if v is not None] - return L - - -def is_iterable(obj): - try: - iter(obj) - except TypeError: - return False - return True - - -def fun_takes_kwargs(fun, kwlist=[]): - # deprecated - S = getattr(fun, 'argspec', getargspec(fun)) - if S.keywords is not None: - return kwlist - return [kw for kw in kwlist if kw in S.args] - - -def isatty(fh): - try: - return fh.isatty() - except AttributeError: - pass - - -def cry(out=None, sepchr='=', seplen=49): # pragma: no cover - """Return stacktrace of all active threads, - taken from https://gist.github.com/737056.""" - import threading - - out = WhateverIO() if out is None else out - P = partial(print, file=out) - - # get a map of threads by their ID so we can print their names - # during the traceback dump - tmap = dict((t.ident, t) for t in threading.enumerate()) - - sep = sepchr * seplen - for tid, frame in items(sys._current_frames()): - thread = tmap.get(tid) - if not thread: - # skip old junk (left-overs from a fork) - continue - P('{0.name}'.format(thread)) - P(sep) - traceback.print_stack(frame, file=out) - P(sep) - P('LOCAL VARIABLES') - P(sep) - pprint(frame.f_locals, stream=out) - P('\n') - return out.getvalue() - - -def maybe_reraise(): - """Re-raise if an exception is currently being handled, or return - otherwise.""" - exc_info = sys.exc_info() - try: - if exc_info[2]: - reraise(exc_info[0], exc_info[1], exc_info[2]) - finally: - # see http://docs.python.org/library/sys.html#sys.exc_info - del(exc_info) - - -def strtobool(term, table={'false': False, 'no': False, '0': False, - 'true': True, 'yes': True, '1': True, - 'on': True, 'off': False}): - """Convert common terms for true/false to bool - (true/false/yes/no/on/off/1/0).""" - if isinstance(term, string_t): - try: - return table[term.lower()] - except KeyError: - raise TypeError('Cannot coerce {0!r} to type bool'.format(term)) - return term - - -def jsonify(obj, - builtin_types=(numbers.Real, string_t), key=None, - keyfilter=None, - unknown_type_filter=None): - """Transforms object making it suitable for json serialization""" - from kombu.abstract import Object as KombuDictType - _jsonify = partial(jsonify, builtin_types=builtin_types, key=key, - keyfilter=keyfilter, - unknown_type_filter=unknown_type_filter) - - if isinstance(obj, KombuDictType): - obj = obj.as_dict(recurse=True) - - if obj is None or isinstance(obj, builtin_types): - return obj - elif isinstance(obj, (tuple, list)): - return [_jsonify(v) for v in obj] - elif isinstance(obj, dict): - return dict((k, _jsonify(v, key=k)) - for k, v in items(obj) - if (keyfilter(k) if keyfilter else 1)) - elif isinstance(obj, datetime.datetime): - # See "Date Time String Format" in the ECMA-262 specification. - r = obj.isoformat() - if obj.microsecond: - r = r[:23] + r[26:] - if r.endswith('+00:00'): - r = r[:-6] + 'Z' - return r - elif isinstance(obj, datetime.date): - return obj.isoformat() - elif isinstance(obj, datetime.time): - r = obj.isoformat() - if obj.microsecond: - r = r[:12] - return r - elif isinstance(obj, datetime.timedelta): - return str(obj) - else: - if unknown_type_filter is None: - raise ValueError( - 'Unsupported type: {0!r} {1!r} (parent: {2})'.format( - type(obj), obj, key)) - return unknown_type_filter(obj) - - -def gen_task_name(app, name, module_name): - """Generate task name from name/module pair.""" - try: - module = sys.modules[module_name] - except KeyError: - # Fix for manage.py shell_plus (Issue #366) - module = None - - if module is not None: - module_name = module.__name__ - # - If the task module is used as the __main__ script - # - we need to rewrite the module part of the task name - # - to match App.main. - if MP_MAIN_FILE and module.__file__ == MP_MAIN_FILE: - # - see comment about :envvar:`MP_MAIN_FILE` above. - module_name = '__main__' - if module_name == '__main__' and app.main: - return '.'.join([app.main, name]) - return '.'.join(p for p in (module_name, name) if p) - - -def nodename(name, hostname): - """Create node name from name/hostname pair.""" - return NODENAME_SEP.join((name, hostname)) - - -def anon_nodename(hostname=None, prefix='gen'): - return nodename(''.join([prefix, str(os.getpid())]), - hostname or socket.gethostname()) - - -def nodesplit(nodename): - """Split node name into tuple of name/hostname.""" - parts = nodename.split(NODENAME_SEP, 1) - if len(parts) == 1: - return None, parts[0] - return parts - - -def default_nodename(hostname): - name, host = nodesplit(hostname or '') - return nodename(name or NODENAME_DEFAULT, host or socket.gethostname()) - - -def node_format(s, nodename, **extra): - name, host = nodesplit(nodename) - return host_format( - s, host, n=name or NODENAME_DEFAULT, **extra) - - -def _fmt_process_index(prefix='', default='0'): - from .log import current_process_index - index = current_process_index() - return '{0}{1}'.format(prefix, index) if index else default -_fmt_process_index_with_prefix = partial(_fmt_process_index, '-', '') - - -def host_format(s, host=None, **extra): - host = host or socket.gethostname() - name, _, domain = host.partition('.') - keys = dict({ - 'h': host, 'n': name, 'd': domain, - 'i': _fmt_process_index, 'I': _fmt_process_index_with_prefix, - }, **extra) - return simple_format(s, keys) - - -def simple_format(s, keys, pattern=RE_FORMAT, expand=r'\1'): - if s: - keys.setdefault('%', '%') - - def resolve(match): - key = match.expand(expand) - try: - resolver = keys[key] - except KeyError: - raise ValueError(UNKNOWN_SIMPLE_FORMAT_KEY.format(key, s)) - if isinstance(resolver, Callable): - return resolver() - return resolver - - return pattern.sub(resolve, s) - return s - - -# ------------------------------------------------------------------------ # -# > XXX Compat -from .log import LOG_LEVELS # noqa -from .imports import ( # noqa - qualname as get_full_cls_name, symbol_by_name as get_cls_by_name, - instantiate, import_from_cwd -) -from .functional import chunks, noop # noqa -from kombu.utils import cached_property, kwdict, uuid # noqa -gen_unique_id = uuid diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/compat.py b/thesisenv/lib/python3.6/site-packages/celery/utils/compat.py deleted file mode 100644 index 6f62964..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/compat.py +++ /dev/null @@ -1 +0,0 @@ -from celery.five import * # noqa diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/debug.py b/thesisenv/lib/python3.6/site-packages/celery/utils/debug.py deleted file mode 100644 index 09c6ec8..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/debug.py +++ /dev/null @@ -1,167 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils.debug - ~~~~~~~~~~~~~~~~~~ - - Utilities for debugging memory usage. - -""" -from __future__ import absolute_import, print_function, unicode_literals - -import os - -from contextlib import contextmanager -from functools import partial - -from celery.five import range -from celery.platforms import signals - -try: - from psutil import Process -except ImportError: - Process = None # noqa - -__all__ = [ - 'blockdetection', 'sample_mem', 'memdump', 'sample', - 'humanbytes', 'mem_rss', 'ps', -] - -UNITS = ( - (2 ** 40.0, 'TB'), - (2 ** 30.0, 'GB'), - (2 ** 20.0, 'MB'), - (2 ** 10.0, 'kB'), - (0.0, '{0!d}b'), -) - -_process = None -_mem_sample = [] - - -def _on_blocking(signum, frame): - import inspect - raise RuntimeError( - 'Blocking detection timed-out at: {0}'.format( - inspect.getframeinfo(frame) - ) - ) - - -@contextmanager -def blockdetection(timeout): - """A timeout context using ``SIGALRM`` that can be used to detect blocking - functions.""" - if not timeout: - yield - else: - old_handler = signals['ALRM'] - old_handler = None if old_handler == _on_blocking else old_handler - - signals['ALRM'] = _on_blocking - - try: - yield signals.arm_alarm(timeout) - finally: - if old_handler: - signals['ALRM'] = old_handler - signals.reset_alarm() - - -def sample_mem(): - """Sample RSS memory usage. - - Statistics can then be output by calling :func:`memdump`. - - """ - current_rss = mem_rss() - _mem_sample.append(current_rss) - return current_rss - - -def _memdump(samples=10): - S = _mem_sample - prev = list(S) if len(S) <= samples else sample(S, samples) - _mem_sample[:] = [] - import gc - gc.collect() - after_collect = mem_rss() - return prev, after_collect - - -def memdump(samples=10, file=None): - """Dump memory statistics. - - Will print a sample of all RSS memory samples added by - calling :func:`sample_mem`, and in addition print - used RSS memory after :func:`gc.collect`. - - """ - say = partial(print, file=file) - if ps() is None: - say('- rss: (psutil not installed).') - return - prev, after_collect = _memdump(samples) - if prev: - say('- rss (sample):') - for mem in prev: - say('- > {0},'.format(mem)) - say('- rss (end): {0}.'.format(after_collect)) - - -def sample(x, n, k=0): - """Given a list `x` a sample of length ``n`` of that list is returned. - - E.g. if `n` is 10, and `x` has 100 items, a list of every 10th - item is returned. - - ``k`` can be used as offset. - - """ - j = len(x) // n - for _ in range(n): - try: - yield x[k] - except IndexError: - break - k += j - - -def hfloat(f, p=5): - """Convert float to value suitable for humans. - - :keyword p: Float precision. - - """ - i = int(f) - return i if i == f else '{0:.{p}}'.format(f, p=p) - - -def humanbytes(s): - """Convert bytes to human-readable form (e.g. kB, MB).""" - return next( - '{0}{1}'.format(hfloat(s / div if div else s), unit) - for div, unit in UNITS if s >= div - ) - - -def mem_rss(): - """Return RSS memory usage as a humanized string.""" - p = ps() - if p is not None: - return humanbytes(_process_memory_info(p).rss) - - -def ps(): - """Return the global :class:`psutil.Process` instance, - or :const:`None` if :mod:`psutil` is not installed.""" - global _process - if _process is None and Process is not None: - _process = Process(os.getpid()) - return _process - - -def _process_memory_info(process): - try: - return process.memory_info() - except AttributeError: - return process.get_memory_info() diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/__init__.py deleted file mode 100644 index b6e8d0b..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import - -from .signal import Signal - -__all__ = ['Signal'] diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/saferef.py b/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/saferef.py deleted file mode 100644 index cd818bb..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/saferef.py +++ /dev/null @@ -1,286 +0,0 @@ -# -*- coding: utf-8 -*- -""" -"Safe weakrefs", originally from pyDispatcher. - -Provides a way to safely weakref any function, including bound methods (which -aren't handled by the core weakref module). -""" -from __future__ import absolute_import - -import sys -import traceback -import weakref - -__all__ = ['safe_ref'] - -PY3 = sys.version_info[0] == 3 - - -def safe_ref(target, on_delete=None): # pragma: no cover - """Return a *safe* weak reference to a callable target - - :param target: the object to be weakly referenced, if it's a - bound method reference, will create a :class:`BoundMethodWeakref`, - otherwise creates a simple :class:`weakref.ref`. - - :keyword on_delete: if provided, will have a hard reference stored - to the callable to be called after the safe reference - goes out of scope with the reference object, (either a - :class:`weakref.ref` or a :class:`BoundMethodWeakref`) as argument. - """ - if getattr(target, '__self__', None) is not None: - # Turn a bound method into a BoundMethodWeakref instance. - # Keep track of these instances for lookup by disconnect(). - assert hasattr(target, '__func__'), \ - """safe_ref target {0!r} has __self__, but no __func__: \ - don't know how to create reference""".format(target) - return get_bound_method_weakref(target=target, - on_delete=on_delete) - if callable(on_delete): - return weakref.ref(target, on_delete) - else: - return weakref.ref(target) - - -class BoundMethodWeakref(object): # pragma: no cover - """'Safe' and reusable weak references to instance methods. - - BoundMethodWeakref objects provide a mechanism for - referencing a bound method without requiring that the - method object itself (which is normally a transient - object) is kept alive. Instead, the BoundMethodWeakref - object keeps weak references to both the object and the - function which together define the instance method. - - .. attribute:: key - - the identity key for the reference, calculated - by the class's :meth:`calculate_key` method applied to the - target instance method - - .. attribute:: deletion_methods - - sequence of callable objects taking - single argument, a reference to this object which - will be called when *either* the target object or - target function is garbage collected (i.e. when - this object becomes invalid). These are specified - as the on_delete parameters of :func:`safe_ref` calls. - - .. attribute:: weak_self - - weak reference to the target object - - .. attribute:: weak_fun - - weak reference to the target function - - .. attribute:: _all_instances - - class attribute pointing to all live - BoundMethodWeakref objects indexed by the class's - `calculate_key(target)` method applied to the target - objects. This weak value dictionary is used to - short-circuit creation so that multiple references - to the same (object, function) pair produce the - same BoundMethodWeakref instance. - - """ - - _all_instances = weakref.WeakValueDictionary() - - def __new__(cls, target, on_delete=None, *arguments, **named): - """Create new instance or return current instance - - Basically this method of construction allows us to - short-circuit creation of references to already- - referenced instance methods. The key corresponding - to the target is calculated, and if there is already - an existing reference, that is returned, with its - deletionMethods attribute updated. Otherwise the - new instance is created and registered in the table - of already-referenced methods. - - """ - key = cls.calculate_key(target) - current = cls._all_instances.get(key) - if current is not None: - current.deletion_methods.append(on_delete) - return current - else: - base = super(BoundMethodWeakref, cls).__new__(cls) - cls._all_instances[key] = base - base.__init__(target, on_delete, *arguments, **named) - return base - - def __init__(self, target, on_delete=None): - """Return a weak-reference-like instance for a bound method - - :param target: the instance-method target for the weak - reference, must have `__self__` and `__func__` attributes - and be reconstructable via:: - - target.__func__.__get__(target.__self__) - - which is true of built-in instance methods. - - :keyword on_delete: optional callback which will be called - when this weak reference ceases to be valid - (i.e. either the object or the function is garbage - collected). Should take a single argument, - which will be passed a pointer to this object. - - """ - def remove(weak, self=self): - """Set self.is_dead to true when method or instance is destroyed""" - methods = self.deletion_methods[:] - del(self.deletion_methods[:]) - try: - del(self.__class__._all_instances[self.key]) - except KeyError: - pass - for function in methods: - try: - if callable(function): - function(self) - except Exception as exc: - try: - traceback.print_exc() - except AttributeError: - print('Exception during saferef {0} cleanup function ' - '{1}: {2}'.format(self, function, exc)) - - self.deletion_methods = [on_delete] - self.key = self.calculate_key(target) - self.weak_self = weakref.ref(target.__self__, remove) - self.weak_fun = weakref.ref(target.__func__, remove) - self.self_name = str(target.__self__) - self.fun_name = str(target.__func__.__name__) - - def calculate_key(cls, target): - """Calculate the reference key for this reference - - Currently this is a two-tuple of the `id()`'s of the - target object and the target function respectively. - """ - return id(target.__self__), id(target.__func__) - calculate_key = classmethod(calculate_key) - - def __str__(self): - """Give a friendly representation of the object""" - return '{0}( {1}.{2} )'.format( - type(self).__name__, - self.self_name, - self.fun_name, - ) - - __repr__ = __str__ - - def __bool__(self): - """Whether we are still a valid reference""" - return self() is not None - __nonzero__ = __bool__ # py2 - - if not PY3: - def __cmp__(self, other): - """Compare with another reference""" - if not isinstance(other, self.__class__): - return cmp(self.__class__, type(other)) # noqa - return cmp(self.key, other.key) # noqa - - def __call__(self): - """Return a strong reference to the bound method - - If the target cannot be retrieved, then will - return None, otherwise return a bound instance - method for our object and function. - - Note: - You may call this method any number of times, - as it does not invalidate the reference. - """ - target = self.weak_self() - if target is not None: - function = self.weak_fun() - if function is not None: - return function.__get__(target) - - -class BoundNonDescriptorMethodWeakref(BoundMethodWeakref): # pragma: no cover - """A specialized :class:`BoundMethodWeakref`, for platforms where - instance methods are not descriptors. - - It assumes that the function name and the target attribute name are the - same, instead of assuming that the function is a descriptor. This approach - is equally fast, but not 100% reliable because functions can be stored on - an attribute named differenty than the function's name such as in:: - - >>> class A(object): - ... pass - - >>> def foo(self): - ... return 'foo' - >>> A.bar = foo - - But this shouldn't be a common use case. So, on platforms where methods - aren't descriptors (such as Jython) this implementation has the advantage - of working in the most cases. - - """ - def __init__(self, target, on_delete=None): - """Return a weak-reference-like instance for a bound method - - :param target: the instance-method target for the weak - reference, must have `__self__` and `__func__` attributes - and be reconstructable via:: - - target.__func__.__get__(target.__self__) - - which is true of built-in instance methods. - - :keyword on_delete: optional callback which will be called - when this weak reference ceases to be valid - (i.e. either the object or the function is garbage - collected). Should take a single argument, - which will be passed a pointer to this object. - - """ - assert getattr(target.__self__, target.__name__) == target - super(BoundNonDescriptorMethodWeakref, self).__init__(target, - on_delete) - - def __call__(self): - """Return a strong reference to the bound method - - If the target cannot be retrieved, then will - return None, otherwise return a bound instance - method for our object and function. - - Note: - You may call this method any number of times, - as it does not invalidate the reference. - - """ - target = self.weak_self() - if target is not None: - function = self.weak_fun() - if function is not None: - # Using curry() would be another option, but it erases the - # "signature" of the function. That is, after a function is - # curried, the inspect module can't be used to determine how - # many arguments the function expects, nor what keyword - # arguments it supports, and pydispatcher needs this - # information. - return getattr(target, function.__name__) - - -def get_bound_method_weakref(target, on_delete): # pragma: no cover - """Instantiates the appropiate :class:`BoundMethodWeakRef`, depending - on the details of the underlying class method implementation.""" - if hasattr(target, '__get__'): - # target method is a descriptor, so the default implementation works: - return BoundMethodWeakref(target=target, on_delete=on_delete) - else: - # no luck, use the alternative implementation: - return BoundNonDescriptorMethodWeakref(target=target, - on_delete=on_delete) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/signal.py b/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/signal.py deleted file mode 100644 index 7d4b337..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/signal.py +++ /dev/null @@ -1,241 +0,0 @@ -# -*- coding: utf-8 -*- -"""Signal class.""" -from __future__ import absolute_import - -import weakref -from . import saferef - -from celery.five import range -from celery.local import PromiseProxy, Proxy - -__all__ = ['Signal'] - -WEAKREF_TYPES = (weakref.ReferenceType, saferef.BoundMethodWeakref) - - -def _make_id(target): # pragma: no cover - if isinstance(target, Proxy): - target = target._get_current_object() - if hasattr(target, '__func__'): - return (id(target.__self__), id(target.__func__)) - return id(target) - - -class Signal(object): # pragma: no cover - """Base class for all signals - - - .. attribute:: receivers - Internal attribute, holds a dictionary of - `{receiverkey (id): weakref(receiver)}` mappings. - - """ - - def __init__(self, providing_args=None): - """Create a new signal. - - :param providing_args: A list of the arguments this signal can pass - along in a :meth:`send` call. - - """ - self.receivers = [] - if providing_args is None: - providing_args = [] - self.providing_args = set(providing_args) - - def _connect_proxy(self, fun, sender, weak, dispatch_uid): - return self.connect( - fun, sender=sender._get_current_object(), - weak=weak, dispatch_uid=dispatch_uid, - ) - - def connect(self, *args, **kwargs): - """Connect receiver to sender for signal. - - :param receiver: A function or an instance method which is to - receive signals. Receivers must be hashable objects. - - if weak is :const:`True`, then receiver must be weak-referencable - (more precisely :func:`saferef.safe_ref()` must be able to create a - reference to the receiver). - - Receivers must be able to accept keyword arguments. - - If receivers have a `dispatch_uid` attribute, the receiver will - not be added if another receiver already exists with that - `dispatch_uid`. - - :keyword sender: The sender to which the receiver should respond. - Must either be of type :class:`Signal`, or :const:`None` to receive - events from any sender. - - :keyword weak: Whether to use weak references to the receiver. - By default, the module will attempt to use weak references to the - receiver objects. If this parameter is false, then strong - references will be used. - - :keyword dispatch_uid: An identifier used to uniquely identify a - particular instance of a receiver. This will usually be a - string, though it may be anything hashable. - - """ - def _handle_options(sender=None, weak=True, dispatch_uid=None): - - def _connect_signal(fun): - receiver = fun - - if isinstance(sender, PromiseProxy): - sender.__then__( - self._connect_proxy, fun, sender, weak, dispatch_uid, - ) - return fun - - if dispatch_uid: - lookup_key = (dispatch_uid, _make_id(sender)) - else: - lookup_key = (_make_id(receiver), _make_id(sender)) - - if weak: - receiver = saferef.safe_ref( - receiver, on_delete=self._remove_receiver, - ) - - for r_key, _ in self.receivers: - if r_key == lookup_key: - break - else: - self.receivers.append((lookup_key, receiver)) - - return fun - - return _connect_signal - - if args and callable(args[0]): - return _handle_options(*args[1:], **kwargs)(args[0]) - return _handle_options(*args, **kwargs) - - def disconnect(self, receiver=None, sender=None, weak=True, - dispatch_uid=None): - """Disconnect receiver from sender for signal. - - If weak references are used, disconnect need not be called. The - receiver will be removed from dispatch automatically. - - :keyword receiver: The registered receiver to disconnect. May be - none if `dispatch_uid` is specified. - - :keyword sender: The registered sender to disconnect. - - :keyword weak: The weakref state to disconnect. - - :keyword dispatch_uid: the unique identifier of the receiver - to disconnect - - """ - if dispatch_uid: - lookup_key = (dispatch_uid, _make_id(sender)) - else: - lookup_key = (_make_id(receiver), _make_id(sender)) - - for index in range(len(self.receivers)): - (r_key, _) = self.receivers[index] - if r_key == lookup_key: - del self.receivers[index] - break - - def send(self, sender, **named): - """Send signal from sender to all connected receivers. - - If any receiver raises an error, the error propagates back through - send, terminating the dispatch loop, so it is quite possible to not - have all receivers called if a raises an error. - - :param sender: The sender of the signal. Either a specific - object or :const:`None`. - - :keyword \*\*named: Named arguments which will be passed to receivers. - - :returns: a list of tuple pairs: `[(receiver, response), … ]`. - - """ - responses = [] - if not self.receivers: - return responses - - for receiver in self._live_receivers(_make_id(sender)): - response = receiver(signal=self, sender=sender, **named) - responses.append((receiver, response)) - return responses - - def send_robust(self, sender, **named): - """Send signal from sender to all connected receivers catching errors. - - :param sender: The sender of the signal. Can be any python object - (normally one registered with a connect if you actually want - something to occur). - - :keyword \*\*named: Named arguments which will be passed to receivers. - These arguments must be a subset of the argument names defined in - :attr:`providing_args`. - - :returns: a list of tuple pairs: `[(receiver, response), … ]`. - - :raises DispatcherKeyError: - - if any receiver raises an error (specifically any subclass of - :exc:`Exception`), the error instance is returned as the result - for that receiver. - - """ - responses = [] - if not self.receivers: - return responses - - # Call each receiver with whatever arguments it can accept. - # Return a list of tuple pairs [(receiver, response), … ]. - for receiver in self._live_receivers(_make_id(sender)): - try: - response = receiver(signal=self, sender=sender, **named) - except Exception as err: - responses.append((receiver, err)) - else: - responses.append((receiver, response)) - return responses - - def _live_receivers(self, senderkey): - """Filter sequence of receivers to get resolved, live receivers. - - This checks for weak references and resolves them, then returning only - live receivers. - - """ - none_senderkey = _make_id(None) - receivers = [] - - for (receiverkey, r_senderkey), receiver in self.receivers: - if r_senderkey == none_senderkey or r_senderkey == senderkey: - if isinstance(receiver, WEAKREF_TYPES): - # Dereference the weak reference. - receiver = receiver() - if receiver is not None: - receivers.append(receiver) - else: - receivers.append(receiver) - return receivers - - def _remove_receiver(self, receiver): - """Remove dead receivers from connections.""" - - to_remove = [] - for key, connected_receiver in self.receivers: - if connected_receiver == receiver: - to_remove.append(key) - for key in to_remove: - for idx, (r_key, _) in enumerate(self.receivers): - if r_key == key: - del self.receivers[idx] - - def __repr__(self): - return ''.format(type(self).__name__) - - __str__ = __repr__ diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/encoding.py b/thesisenv/lib/python3.6/site-packages/celery/utils/encoding.py deleted file mode 100644 index 3ddcd35..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/encoding.py +++ /dev/null @@ -1,14 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils.encoding - ~~~~~~~~~~~~~~~~~~~~~ - - This module has moved to :mod:`kombu.utils.encoding`. - -""" -from __future__ import absolute_import - -from kombu.utils.encoding import ( # noqa - default_encode, default_encoding, bytes_t, bytes_to_str, str_t, - str_to_bytes, ensure_bytes, from_utf8, safe_str, safe_repr, -) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/functional.py b/thesisenv/lib/python3.6/site-packages/celery/utils/functional.py deleted file mode 100644 index e55b812..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/functional.py +++ /dev/null @@ -1,323 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils.functional - ~~~~~~~~~~~~~~~~~~~~~~~ - - Utilities for functions. - -""" -from __future__ import absolute_import - -import sys -import threading - -from functools import wraps -from itertools import islice - -from kombu.utils import cached_property -from kombu.utils.functional import lazy, maybe_evaluate, is_list, maybe_list -from kombu.utils.compat import OrderedDict - -from celery.five import UserDict, UserList, items, keys, range - -__all__ = ['LRUCache', 'is_list', 'maybe_list', 'memoize', 'mlazy', 'noop', - 'first', 'firstmethod', 'chunks', 'padlist', 'mattrgetter', 'uniq', - 'regen', 'dictfilter', 'lazy', 'maybe_evaluate'] - -IS_PYPY = hasattr(sys, 'pypy_version_info') - -KEYWORD_MARK = object() - - -class DummyContext(object): - - def __enter__(self): - return self - - def __exit__(self, *exc_info): - pass - - -class LRUCache(UserDict): - """LRU Cache implementation using a doubly linked list to track access. - - :keyword limit: The maximum number of keys to keep in the cache. - When a new key is inserted and the limit has been exceeded, - the *Least Recently Used* key will be discarded from the - cache. - - """ - - def __init__(self, limit=None): - self.limit = limit - self.mutex = threading.RLock() - self.data = OrderedDict() - - def __getitem__(self, key): - with self.mutex: - value = self[key] = self.data.pop(key) - return value - - def update(self, *args, **kwargs): - with self.mutex: - data, limit = self.data, self.limit - data.update(*args, **kwargs) - if limit and len(data) > limit: - # pop additional items in case limit exceeded - for _ in range(len(data) - limit): - data.popitem(last=False) - - def popitem(self, last=True): - with self.mutex: - return self.data.popitem(last) - - def __setitem__(self, key, value): - # remove least recently used key. - with self.mutex: - if self.limit and len(self.data) >= self.limit: - self.data.pop(next(iter(self.data))) - self.data[key] = value - - def __iter__(self): - return iter(self.data) - - def _iterate_items(self, _need_lock=IS_PYPY): - with self.mutex if _need_lock else DummyContext(): - for k in self: - try: - yield (k, self.data[k]) - except KeyError: # pragma: no cover - pass - iteritems = _iterate_items - - def _iterate_values(self, _need_lock=IS_PYPY): - with self.mutex if _need_lock else DummyContext(): - for k in self: - try: - yield self.data[k] - except KeyError: # pragma: no cover - pass - - itervalues = _iterate_values - - def _iterate_keys(self): - # userdict.keys in py3k calls __getitem__ - return keys(self.data) - iterkeys = _iterate_keys - - def incr(self, key, delta=1): - with self.mutex: - # this acts as memcached does- store as a string, but return a - # integer as long as it exists and we can cast it - newval = int(self.data.pop(key)) + delta - self[key] = str(newval) - return newval - - def __getstate__(self): - d = dict(vars(self)) - d.pop('mutex') - return d - - def __setstate__(self, state): - self.__dict__ = state - self.mutex = threading.RLock() - - if sys.version_info[0] == 3: # pragma: no cover - keys = _iterate_keys - values = _iterate_values - items = _iterate_items - else: # noqa - - def keys(self): - return list(self._iterate_keys()) - - def values(self): - return list(self._iterate_values()) - - def items(self): - return list(self._iterate_items()) - - -def memoize(maxsize=None, keyfun=None, Cache=LRUCache): - - def _memoize(fun): - mutex = threading.Lock() - cache = Cache(limit=maxsize) - - @wraps(fun) - def _M(*args, **kwargs): - if keyfun: - key = keyfun(args, kwargs) - else: - key = args + (KEYWORD_MARK, ) + tuple(sorted(kwargs.items())) - try: - with mutex: - value = cache[key] - except KeyError: - value = fun(*args, **kwargs) - _M.misses += 1 - with mutex: - cache[key] = value - else: - _M.hits += 1 - return value - - def clear(): - """Clear the cache and reset cache statistics.""" - cache.clear() - _M.hits = _M.misses = 0 - - _M.hits = _M.misses = 0 - _M.clear = clear - _M.original_func = fun - return _M - - return _memoize - - -class mlazy(lazy): - """Memoized lazy evaluation. - - The function is only evaluated once, every subsequent access - will return the same value. - - .. attribute:: evaluated - - Set to to :const:`True` after the object has been evaluated. - - """ - evaluated = False - _value = None - - def evaluate(self): - if not self.evaluated: - self._value = super(mlazy, self).evaluate() - self.evaluated = True - return self._value - - -def noop(*args, **kwargs): - """No operation. - - Takes any arguments/keyword arguments and does nothing. - - """ - pass - - -def first(predicate, it): - """Return the first element in `iterable` that `predicate` Gives a - :const:`True` value for. - - If `predicate` is None it will return the first item that is not None. - - """ - return next( - (v for v in it if (predicate(v) if predicate else v is not None)), - None, - ) - - -def firstmethod(method): - """Return a function that with a list of instances, - finds the first instance that gives a value for the given method. - - The list can also contain lazy instances - (:class:`~kombu.utils.functional.lazy`.) - - """ - - def _matcher(it, *args, **kwargs): - for obj in it: - try: - answer = getattr(maybe_evaluate(obj), method)(*args, **kwargs) - except AttributeError: - pass - else: - if answer is not None: - return answer - - return _matcher - - -def chunks(it, n): - """Split an iterator into chunks with `n` elements each. - - Examples - - # n == 2 - >>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 2) - >>> list(x) - [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10]] - - # n == 3 - >>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 3) - >>> list(x) - [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]] - - """ - # XXX This function is not used anymore, at least not by Celery itself. - for first in it: - yield [first] + list(islice(it, n - 1)) - - -def padlist(container, size, default=None): - """Pad list with default elements. - - Examples: - - >>> first, last, city = padlist(['George', 'Costanza', 'NYC'], 3) - ('George', 'Costanza', 'NYC') - >>> first, last, city = padlist(['George', 'Costanza'], 3) - ('George', 'Costanza', None) - >>> first, last, city, planet = padlist( - ... ['George', 'Costanza', 'NYC'], 4, default='Earth', - ... ) - ('George', 'Costanza', 'NYC', 'Earth') - - """ - return list(container)[:size] + [default] * (size - len(container)) - - -def mattrgetter(*attrs): - """Like :func:`operator.itemgetter` but return :const:`None` on missing - attributes instead of raising :exc:`AttributeError`.""" - return lambda obj: dict((attr, getattr(obj, attr, None)) - for attr in attrs) - - -def uniq(it): - """Return all unique elements in ``it``, preserving order.""" - seen = set() - return (seen.add(obj) or obj for obj in it if obj not in seen) - - -def regen(it): - """Regen takes any iterable, and if the object is an - generator it will cache the evaluated list on first access, - so that the generator can be "consumed" multiple times.""" - if isinstance(it, (list, tuple)): - return it - return _regen(it) - - -class _regen(UserList, list): - # must be subclass of list so that json can encode. - def __init__(self, it): - self.__it = it - - def __reduce__(self): - return list, (self.data, ) - - def __length_hint__(self): - return self.__it.__length_hint__() - - @cached_property - def data(self): - return list(self.__it) - - -def dictfilter(d=None, **kw): - """Remove all keys from dict ``d`` whose value is :const:`None`""" - d = kw if d is None else (dict(d, **kw) if kw else d) - return dict((k, v) for k, v in items(d) if v is not None) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/imports.py b/thesisenv/lib/python3.6/site-packages/celery/utils/imports.py deleted file mode 100644 index 22a2fdc..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/imports.py +++ /dev/null @@ -1,114 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils.import - ~~~~~~~~~~~~~~~~~~~ - - Utilities related to importing modules and symbols by name. - -""" -from __future__ import absolute_import - -import imp as _imp -import importlib -import os -import sys - -from contextlib import contextmanager - -from kombu.utils import symbol_by_name - -from celery.five import reload - -__all__ = [ - 'NotAPackage', 'qualname', 'instantiate', 'symbol_by_name', 'cwd_in_path', - 'find_module', 'import_from_cwd', 'reload_from_cwd', 'module_file', -] - - -class NotAPackage(Exception): - pass - - -if sys.version_info > (3, 3): # pragma: no cover - def qualname(obj): - if not hasattr(obj, '__name__') and hasattr(obj, '__class__'): - obj = obj.__class__ - q = getattr(obj, '__qualname__', None) - if '.' not in q: - q = '.'.join((obj.__module__, q)) - return q -else: - def qualname(obj): # noqa - if not hasattr(obj, '__name__') and hasattr(obj, '__class__'): - obj = obj.__class__ - return '.'.join((obj.__module__, obj.__name__)) - - -def instantiate(name, *args, **kwargs): - """Instantiate class by name. - - See :func:`symbol_by_name`. - - """ - return symbol_by_name(name)(*args, **kwargs) - - -@contextmanager -def cwd_in_path(): - cwd = os.getcwd() - if cwd in sys.path: - yield - else: - sys.path.insert(0, cwd) - try: - yield cwd - finally: - try: - sys.path.remove(cwd) - except ValueError: # pragma: no cover - pass - - -def find_module(module, path=None, imp=None): - """Version of :func:`imp.find_module` supporting dots.""" - if imp is None: - imp = importlib.import_module - with cwd_in_path(): - if '.' in module: - last = None - parts = module.split('.') - for i, part in enumerate(parts[:-1]): - mpart = imp('.'.join(parts[:i + 1])) - try: - path = mpart.__path__ - except AttributeError: - raise NotAPackage(module) - last = _imp.find_module(parts[i + 1], path) - return last - return _imp.find_module(module) - - -def import_from_cwd(module, imp=None, package=None): - """Import module, but make sure it finds modules - located in the current directory. - - Modules located in the current directory has - precedence over modules located in `sys.path`. - """ - if imp is None: - imp = importlib.import_module - with cwd_in_path(): - return imp(module, package=package) - - -def reload_from_cwd(module, reloader=None): - if reloader is None: - reloader = reload - with cwd_in_path(): - return reloader(module) - - -def module_file(module): - """Return the correct original file name of a module.""" - name = module.__file__ - return name[:-1] if name.endswith('.pyc') else name diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/iso8601.py b/thesisenv/lib/python3.6/site-packages/celery/utils/iso8601.py deleted file mode 100644 index c951cf6..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/iso8601.py +++ /dev/null @@ -1,77 +0,0 @@ -""" -Originally taken from pyiso8601 (http://code.google.com/p/pyiso8601/) - -Modified to match the behavior of dateutil.parser: - - - raise ValueError instead of ParseError - - return naive datetimes by default - - uses pytz.FixedOffset - -This is the original License: - -Copyright (c) 2007 Michael Twomey - -Permission is hereby granted, free of charge, to any person obtaining a -copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -""" -from __future__ import absolute_import - -import re - -from datetime import datetime -from pytz import FixedOffset - -__all__ = ['parse_iso8601'] - -# Adapted from http://delete.me.uk/2005/03/iso8601.html -ISO8601_REGEX = re.compile( - r'(?P[0-9]{4})(-(?P[0-9]{1,2})(-(?P[0-9]{1,2})' - r'((?P.)(?P[0-9]{2}):(?P[0-9]{2})' - '(:(?P[0-9]{2})(\.(?P[0-9]+))?)?' - r'(?PZ|(([-+])([0-9]{2}):([0-9]{2})))?)?)?)?' -) -TIMEZONE_REGEX = re.compile( - '(?P[+-])(?P[0-9]{2}).(?P[0-9]{2})' -) - - -def parse_iso8601(datestring): - """Parse and convert ISO 8601 string into a datetime object""" - m = ISO8601_REGEX.match(datestring) - if not m: - raise ValueError('unable to parse date string %r' % datestring) - groups = m.groupdict() - tz = groups['timezone'] - if tz == 'Z': - tz = FixedOffset(0) - elif tz: - m = TIMEZONE_REGEX.match(tz) - prefix, hours, minutes = m.groups() - hours, minutes = int(hours), int(minutes) - if prefix == '-': - hours = -hours - minutes = -minutes - tz = FixedOffset(minutes + hours * 60) - frac = groups['fraction'] or 0 - return datetime( - int(groups['year']), int(groups['month']), int(groups['day']), - int(groups['hour']), int(groups['minute']), int(groups['second']), - int(frac), tz - ) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/log.py b/thesisenv/lib/python3.6/site-packages/celery/utils/log.py deleted file mode 100644 index b786d39..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/log.py +++ /dev/null @@ -1,301 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils.log - ~~~~~~~~~~~~~~~~ - - Logging utilities. - -""" -from __future__ import absolute_import, print_function - -import logging -import numbers -import os -import sys -import threading -import traceback - -from contextlib import contextmanager -from billiard import current_process, util as mputil -from kombu.five import values -from kombu.log import get_logger as _get_logger, LOG_LEVELS -from kombu.utils.encoding import safe_str - -from celery.five import string_t, text_t - -from .term import colored - -__all__ = ['ColorFormatter', 'LoggingProxy', 'base_logger', - 'set_in_sighandler', 'in_sighandler', 'get_logger', - 'get_task_logger', 'mlevel', 'ensure_process_aware_logger', - 'get_multiprocessing_logger', 'reset_multiprocessing_logger'] - -_process_aware = False -PY3 = sys.version_info[0] == 3 - -MP_LOG = os.environ.get('MP_LOG', False) - - -# Sets up our logging hierarchy. -# -# Every logger in the celery package inherits from the "celery" -# logger, and every task logger inherits from the "celery.task" -# logger. -base_logger = logger = _get_logger('celery') -mp_logger = _get_logger('multiprocessing') - -_in_sighandler = False - - -def set_in_sighandler(value): - global _in_sighandler - _in_sighandler = value - - -def iter_open_logger_fds(): - seen = set() - loggers = (list(values(logging.Logger.manager.loggerDict)) + - [logging.getLogger(None)]) - for logger in loggers: - try: - for handler in logger.handlers: - try: - if handler not in seen: - yield handler.stream - seen.add(handler) - except AttributeError: - pass - except AttributeError: # PlaceHolder does not have handlers - pass - - -@contextmanager -def in_sighandler(): - set_in_sighandler(True) - try: - yield - finally: - set_in_sighandler(False) - - -def logger_isa(l, p, max=1000): - this, seen = l, set() - for _ in range(max): - if this == p: - return True - else: - if this in seen: - raise RuntimeError( - 'Logger {0!r} parents recursive'.format(l), - ) - seen.add(this) - this = this.parent - if not this: - break - else: - raise RuntimeError('Logger hierarchy exceeds {0}'.format(max)) - return False - - -def get_logger(name): - l = _get_logger(name) - if logging.root not in (l, l.parent) and l is not base_logger: - if not logger_isa(l, base_logger): - l.parent = base_logger - return l -task_logger = get_logger('celery.task') -worker_logger = get_logger('celery.worker') - - -def get_task_logger(name): - logger = get_logger(name) - if not logger_isa(logger, task_logger): - logger.parent = task_logger - return logger - - -def mlevel(level): - if level and not isinstance(level, numbers.Integral): - return LOG_LEVELS[level.upper()] - return level - - -class ColorFormatter(logging.Formatter): - #: Loglevel -> Color mapping. - COLORS = colored().names - colors = {'DEBUG': COLORS['blue'], 'WARNING': COLORS['yellow'], - 'ERROR': COLORS['red'], 'CRITICAL': COLORS['magenta']} - - def __init__(self, fmt=None, use_color=True): - logging.Formatter.__init__(self, fmt) - self.use_color = use_color - - def formatException(self, ei): - if ei and not isinstance(ei, tuple): - ei = sys.exc_info() - r = logging.Formatter.formatException(self, ei) - if isinstance(r, str) and not PY3: - return safe_str(r) - return r - - def format(self, record): - msg = logging.Formatter.format(self, record) - color = self.colors.get(record.levelname) - - # reset exception info later for other handlers... - einfo = sys.exc_info() if record.exc_info == 1 else record.exc_info - - if color and self.use_color: - try: - # safe_str will repr the color object - # and color will break on non-string objects - # so need to reorder calls based on type. - # Issue #427 - try: - if isinstance(msg, string_t): - return text_t(color(safe_str(msg))) - return safe_str(color(msg)) - except UnicodeDecodeError: - return safe_str(msg) # skip colors - except Exception as exc: - prev_msg, record.exc_info, record.msg = ( - record.msg, 1, ''.format( - type(msg), exc - ), - ) - try: - return logging.Formatter.format(self, record) - finally: - record.msg, record.exc_info = prev_msg, einfo - else: - return safe_str(msg) - - -class LoggingProxy(object): - """Forward file object to :class:`logging.Logger` instance. - - :param logger: The :class:`logging.Logger` instance to forward to. - :param loglevel: Loglevel to use when writing messages. - - """ - mode = 'w' - name = None - closed = False - loglevel = logging.ERROR - _thread = threading.local() - - def __init__(self, logger, loglevel=None): - self.logger = logger - self.loglevel = mlevel(loglevel or self.logger.level or self.loglevel) - self._safewrap_handlers() - - def _safewrap_handlers(self): - """Make the logger handlers dump internal errors to - `sys.__stderr__` instead of `sys.stderr` to circumvent - infinite loops.""" - - def wrap_handler(handler): # pragma: no cover - - class WithSafeHandleError(logging.Handler): - - def handleError(self, record): - exc_info = sys.exc_info() - try: - try: - traceback.print_exception(exc_info[0], - exc_info[1], - exc_info[2], - None, sys.__stderr__) - except IOError: - pass # see python issue 5971 - finally: - del(exc_info) - - handler.handleError = WithSafeHandleError().handleError - return [wrap_handler(h) for h in self.logger.handlers] - - def write(self, data): - """Write message to logging object.""" - if _in_sighandler: - return print(safe_str(data), file=sys.__stderr__) - if getattr(self._thread, 'recurse_protection', False): - # Logger is logging back to this file, so stop recursing. - return - data = data.strip() - if data and not self.closed: - self._thread.recurse_protection = True - try: - self.logger.log(self.loglevel, safe_str(data)) - finally: - self._thread.recurse_protection = False - - def writelines(self, sequence): - """`writelines(sequence_of_strings) -> None`. - - Write the strings to the file. - - The sequence can be any iterable object producing strings. - This is equivalent to calling :meth:`write` for each string. - - """ - for part in sequence: - self.write(part) - - def flush(self): - """This object is not buffered so any :meth:`flush` requests - are ignored.""" - pass - - def close(self): - """When the object is closed, no write requests are forwarded to - the logging object anymore.""" - self.closed = True - - def isatty(self): - """Always return :const:`False`. Just here for file support.""" - return False - - -def ensure_process_aware_logger(force=False): - """Make sure process name is recorded when loggers are used.""" - global _process_aware - if force or not _process_aware: - logging._acquireLock() - try: - _process_aware = True - Logger = logging.getLoggerClass() - if getattr(Logger, '_process_aware', False): # pragma: no cover - return - - class ProcessAwareLogger(Logger): - _signal_safe = True - _process_aware = True - - def makeRecord(self, *args, **kwds): - record = Logger.makeRecord(self, *args, **kwds) - record.processName = current_process()._name - return record - - def log(self, *args, **kwargs): - if _in_sighandler: - return - return Logger.log(self, *args, **kwargs) - logging.setLoggerClass(ProcessAwareLogger) - finally: - logging._releaseLock() - - -def get_multiprocessing_logger(): - return mputil.get_logger() if mputil else None - - -def reset_multiprocessing_logger(): - if mputil and hasattr(mputil, '_logger'): - mputil._logger = None - - -def current_process_index(base=1): - if current_process: - index = getattr(current_process(), 'index', None) - return index + base if index is not None else index -ensure_process_aware_logger() diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/mail.py b/thesisenv/lib/python3.6/site-packages/celery/utils/mail.py deleted file mode 100644 index 00c5f29..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/mail.py +++ /dev/null @@ -1,190 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils.mail - ~~~~~~~~~~~~~~~~~ - - How task error emails are formatted and sent. - -""" -from __future__ import absolute_import - -import smtplib -import socket -import traceback -import warnings - -from email.mime.text import MIMEText - -from .functional import maybe_list - -try: - from ssl import SSLError -except ImportError: # pragma: no cover - class SSLError(Exception): # noqa - """fallback used when ssl module not compiled.""" - -__all__ = ['SendmailWarning', 'Message', 'Mailer', 'ErrorMail'] - -_local_hostname = None - - -def get_local_hostname(): - global _local_hostname - if _local_hostname is None: - _local_hostname = socket.getfqdn() - return _local_hostname - - -class SendmailWarning(UserWarning): - """Problem happened while sending the email message.""" - - -class Message(object): - - def __init__(self, to=None, sender=None, subject=None, - body=None, charset='us-ascii'): - self.to = maybe_list(to) - self.sender = sender - self.subject = subject - self.body = body - self.charset = charset - - def __repr__(self): - return ''.format(self) - - def __str__(self): - msg = MIMEText(self.body, 'plain', self.charset) - msg['Subject'] = self.subject - msg['From'] = self.sender - msg['To'] = ', '.join(self.to) - return msg.as_string() - - -class Mailer(object): - - def __init__(self, host='localhost', port=0, user=None, password=None, - timeout=2, use_ssl=False, use_tls=False): - self.host = host - self.port = port - self.user = user - self.password = password - self.timeout = timeout - self.use_ssl = use_ssl - self.use_tls = use_tls - - def send(self, message, fail_silently=False, **kwargs): - try: - self._send(message, **kwargs) - except Exception as exc: - if not fail_silently: - raise - warnings.warn(SendmailWarning( - 'Mail could not be sent: {0!r} {1!r}\n{2!r}'.format( - exc, {'To': ', '.join(message.to), - 'Subject': message.subject}, - traceback.format_stack()))) - - def _send(self, message, **kwargs): - Client = smtplib.SMTP_SSL if self.use_ssl else smtplib.SMTP - client = Client(self.host, self.port, timeout=self.timeout, - local_hostname=get_local_hostname(), **kwargs) - - if self.use_tls: - client.ehlo() - client.starttls() - client.ehlo() - - if self.user and self.password: - client.login(self.user, self.password) - - client.sendmail(message.sender, message.to, str(message)) - try: - client.quit() - except SSLError: - client.close() - - -class ErrorMail(object): - """Defines how and when task error e-mails should be sent. - - :param task: The task instance that raised the error. - - :attr:`subject` and :attr:`body` are format strings which - are passed a context containing the following keys: - - * name - - Name of the task. - - * id - - UUID of the task. - - * exc - - String representation of the exception. - - * args - - Positional arguments. - - * kwargs - - Keyword arguments. - - * traceback - - String representation of the traceback. - - * hostname - - Worker nodename. - - """ - - # pep8.py borks on a inline signature separator and - # says "trailing whitespace" ;) - EMAIL_SIGNATURE_SEP = '-- ' - - #: Format string used to generate error email subjects. - subject = """\ - [{hostname}] Error: Task {name} ({id}): {exc!r} - """ - - #: Format string used to generate error email content. - body = """ -Task {{name}} with id {{id}} raised exception:\n{{exc!r}} - - -Task was called with args: {{args}} kwargs: {{kwargs}}. - -The contents of the full traceback was: - -{{traceback}} - -{EMAIL_SIGNATURE_SEP} -Just to let you know, -py-celery at {{hostname}}. -""".format(EMAIL_SIGNATURE_SEP=EMAIL_SIGNATURE_SEP) - - def __init__(self, task, **kwargs): - self.task = task - self.subject = kwargs.get('subject', self.subject) - self.body = kwargs.get('body', self.body) - - def should_send(self, context, exc): - """Return true or false depending on if a task error mail - should be sent for this type of error.""" - return True - - def format_subject(self, context): - return self.subject.strip().format(**context) - - def format_body(self, context): - return self.body.strip().format(**context) - - def send(self, context, exc, fail_silently=True): - if self.should_send(context, exc): - self.task.app.mail_admins(self.format_subject(context), - self.format_body(context), - fail_silently=fail_silently) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/objects.py b/thesisenv/lib/python3.6/site-packages/celery/utils/objects.py deleted file mode 100644 index 8a2f7f6..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/objects.py +++ /dev/null @@ -1,91 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils.objects - ~~~~~~~~~~~~~~~~~~~~ - - Object related utilities including introspection, etc. - -""" -from __future__ import absolute_import - -__all__ = ['mro_lookup'] - - -class Bunch(object): - """Object that enables you to modify attributes.""" - - def __init__(self, **kwargs): - self.__dict__.update(kwargs) - - -def mro_lookup(cls, attr, stop=(), monkey_patched=[]): - """Return the first node by MRO order that defines an attribute. - - :keyword stop: A list of types that if reached will stop the search. - :keyword monkey_patched: Use one of the stop classes if the attr's - module origin is not in this list, this to detect monkey patched - attributes. - - :returns None: if the attribute was not found. - - """ - for node in cls.mro(): - if node in stop: - try: - attr = node.__dict__[attr] - module_origin = attr.__module__ - except (AttributeError, KeyError): - pass - else: - if module_origin not in monkey_patched: - return node - return - if attr in node.__dict__: - return node - - -class FallbackContext(object): - """The built-in ``@contextmanager`` utility does not work well - when wrapping other contexts, as the traceback is wrong when - the wrapped context raises. - - This solves this problem and can be used instead of ``@contextmanager`` - in this example:: - - @contextmanager - def connection_or_default_connection(connection=None): - if connection: - # user already has a connection, should not close - # after use - yield connection - else: - # must have new connection, and also close the connection - # after the block returns - with create_new_connection() as connection: - yield connection - - This wrapper can be used instead for the above like this:: - - def connection_or_default_connection(connection=None): - return FallbackContext(connection, create_new_connection) - - """ - - def __init__(self, provided, fallback, *fb_args, **fb_kwargs): - self.provided = provided - self.fallback = fallback - self.fb_args = fb_args - self.fb_kwargs = fb_kwargs - self._context = None - - def __enter__(self): - if self.provided is not None: - return self.provided - context = self._context = self.fallback( - *self.fb_args, **self.fb_kwargs - ).__enter__() - return context - - def __exit__(self, *exc_info): - if self._context is not None: - return self._context.__exit__(*exc_info) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/serialization.py b/thesisenv/lib/python3.6/site-packages/celery/utils/serialization.py deleted file mode 100644 index d5509f1..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/serialization.py +++ /dev/null @@ -1,167 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils.serialization - ~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Utilities for safely pickling exceptions. - -""" -from __future__ import absolute_import - -from inspect import getmro -from itertools import takewhile - -try: - import cPickle as pickle -except ImportError: - import pickle # noqa - -from .encoding import safe_repr - -__all__ = ['UnpickleableExceptionWrapper', 'subclass_exception', - 'find_pickleable_exception', 'create_exception_cls', - 'get_pickleable_exception', 'get_pickleable_etype', - 'get_pickled_exception'] - -#: List of base classes we probably don't want to reduce to. -try: - unwanted_base_classes = (StandardError, Exception, BaseException, object) -except NameError: # pragma: no cover - unwanted_base_classes = (Exception, BaseException, object) # py3k - - -def subclass_exception(name, parent, module): # noqa - return type(name, (parent, ), {'__module__': module}) - - -def find_pickleable_exception(exc, loads=pickle.loads, - dumps=pickle.dumps): - """With an exception instance, iterate over its super classes (by mro) - and find the first super exception that is pickleable. It does - not go below :exc:`Exception` (i.e. it skips :exc:`Exception`, - :class:`BaseException` and :class:`object`). If that happens - you should use :exc:`UnpickleableException` instead. - - :param exc: An exception instance. - - Will return the nearest pickleable parent exception class - (except :exc:`Exception` and parents), or if the exception is - pickleable it will return :const:`None`. - - :rtype :exc:`Exception`: - - """ - exc_args = getattr(exc, 'args', []) - for supercls in itermro(exc.__class__, unwanted_base_classes): - try: - superexc = supercls(*exc_args) - loads(dumps(superexc)) - except: - pass - else: - return superexc -find_nearest_pickleable_exception = find_pickleable_exception # XXX compat - - -def itermro(cls, stop): - return takewhile(lambda sup: sup not in stop, getmro(cls)) - - -def create_exception_cls(name, module, parent=None): - """Dynamically create an exception class.""" - if not parent: - parent = Exception - return subclass_exception(name, parent, module) - - -class UnpickleableExceptionWrapper(Exception): - """Wraps unpickleable exceptions. - - :param exc_module: see :attr:`exc_module`. - :param exc_cls_name: see :attr:`exc_cls_name`. - :param exc_args: see :attr:`exc_args` - - **Example** - - .. code-block:: python - - >>> def pickle_it(raising_function): - ... try: - ... raising_function() - ... except Exception as e: - ... exc = UnpickleableExceptionWrapper( - ... e.__class__.__module__, - ... e.__class__.__name__, - ... e.args, - ... ) - ... pickle.dumps(exc) # Works fine. - - """ - - #: The module of the original exception. - exc_module = None - - #: The name of the original exception class. - exc_cls_name = None - - #: The arguments for the original exception. - exc_args = None - - def __init__(self, exc_module, exc_cls_name, exc_args, text=None): - safe_exc_args = [] - for arg in exc_args: - try: - pickle.dumps(arg) - safe_exc_args.append(arg) - except Exception: - safe_exc_args.append(safe_repr(arg)) - self.exc_module = exc_module - self.exc_cls_name = exc_cls_name - self.exc_args = safe_exc_args - self.text = text - Exception.__init__(self, exc_module, exc_cls_name, safe_exc_args, text) - - def restore(self): - return create_exception_cls(self.exc_cls_name, - self.exc_module)(*self.exc_args) - - def __str__(self): - return self.text - - @classmethod - def from_exception(cls, exc): - return cls(exc.__class__.__module__, - exc.__class__.__name__, - getattr(exc, 'args', []), - safe_repr(exc)) - - -def get_pickleable_exception(exc): - """Make sure exception is pickleable.""" - try: - pickle.loads(pickle.dumps(exc)) - except Exception: - pass - else: - return exc - nearest = find_pickleable_exception(exc) - if nearest: - return nearest - return UnpickleableExceptionWrapper.from_exception(exc) - - -def get_pickleable_etype(cls, loads=pickle.loads, dumps=pickle.dumps): - try: - loads(dumps(cls)) - except: - return Exception - else: - return cls - - -def get_pickled_exception(exc): - """Get original exception from exception pickled using - :meth:`get_pickleable_exception`.""" - if isinstance(exc, UnpickleableExceptionWrapper): - return exc.restore() - return exc diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/sysinfo.py b/thesisenv/lib/python3.6/site-packages/celery/utils/sysinfo.py deleted file mode 100644 index 65073a6..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/sysinfo.py +++ /dev/null @@ -1,45 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import - -import os - -from math import ceil - -from kombu.utils import cached_property - -__all__ = ['load_average', 'df'] - - -if hasattr(os, 'getloadavg'): - - def load_average(): - return tuple(ceil(l * 1e2) / 1e2 for l in os.getloadavg()) - -else: # pragma: no cover - # Windows doesn't have getloadavg - def load_average(): # noqa - return (0.0, 0.0, 0.0) - - -class df(object): - - def __init__(self, path): - self.path = path - - @property - def total_blocks(self): - return self.stat.f_blocks * self.stat.f_frsize / 1024 - - @property - def available(self): - return self.stat.f_bavail * self.stat.f_frsize / 1024 - - @property - def capacity(self): - avail = self.stat.f_bavail - used = self.stat.f_blocks - self.stat.f_bfree - return int(ceil(used * 100.0 / (used + avail) + 0.5)) - - @cached_property - def stat(self): - return os.statvfs(os.path.abspath(self.path)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/term.py b/thesisenv/lib/python3.6/site-packages/celery/utils/term.py deleted file mode 100644 index 430c695..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/term.py +++ /dev/null @@ -1,162 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils.term - ~~~~~~~~~~~~~~~~~ - - Terminals and colors. - -""" -from __future__ import absolute_import, unicode_literals - -import platform - -from functools import reduce - -from kombu.utils.encoding import safe_str -from celery.five import string - -__all__ = ['colored'] - -IS_WINDOWS = platform.system() == 'Windows' - -BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8) -OP_SEQ = '\033[%dm' -RESET_SEQ = '\033[0m' -COLOR_SEQ = '\033[1;%dm' - - -def fg(s): - return COLOR_SEQ % s - - -class colored(object): - """Terminal colored text. - - Example:: - >>> c = colored(enabled=True) - >>> print(str(c.red('the quick '), c.blue('brown ', c.bold('fox ')), - ... c.magenta(c.underline('jumps over')), - ... c.yellow(' the lazy '), - ... c.green('dog '))) - - """ - - def __init__(self, *s, **kwargs): - self.s = s - self.enabled = not IS_WINDOWS and kwargs.get('enabled', True) - self.op = kwargs.get('op', '') - self.names = {'black': self.black, - 'red': self.red, - 'green': self.green, - 'yellow': self.yellow, - 'blue': self.blue, - 'magenta': self.magenta, - 'cyan': self.cyan, - 'white': self.white} - - def _add(self, a, b): - return string(a) + string(b) - - def _fold_no_color(self, a, b): - try: - A = a.no_color() - except AttributeError: - A = string(a) - try: - B = b.no_color() - except AttributeError: - B = string(b) - - return ''.join((string(A), string(B))) - - def no_color(self): - if self.s: - return string(reduce(self._fold_no_color, self.s)) - return '' - - def embed(self): - prefix = '' - if self.enabled: - prefix = self.op - return ''.join((string(prefix), string(reduce(self._add, self.s)))) - - def __unicode__(self): - suffix = '' - if self.enabled: - suffix = RESET_SEQ - return string(''.join((self.embed(), string(suffix)))) - - def __str__(self): - return safe_str(self.__unicode__()) - - def node(self, s, op): - return self.__class__(enabled=self.enabled, op=op, *s) - - def black(self, *s): - return self.node(s, fg(30 + BLACK)) - - def red(self, *s): - return self.node(s, fg(30 + RED)) - - def green(self, *s): - return self.node(s, fg(30 + GREEN)) - - def yellow(self, *s): - return self.node(s, fg(30 + YELLOW)) - - def blue(self, *s): - return self.node(s, fg(30 + BLUE)) - - def magenta(self, *s): - return self.node(s, fg(30 + MAGENTA)) - - def cyan(self, *s): - return self.node(s, fg(30 + CYAN)) - - def white(self, *s): - return self.node(s, fg(30 + WHITE)) - - def __repr__(self): - return repr(self.no_color()) - - def bold(self, *s): - return self.node(s, OP_SEQ % 1) - - def underline(self, *s): - return self.node(s, OP_SEQ % 4) - - def blink(self, *s): - return self.node(s, OP_SEQ % 5) - - def reverse(self, *s): - return self.node(s, OP_SEQ % 7) - - def bright(self, *s): - return self.node(s, OP_SEQ % 8) - - def ired(self, *s): - return self.node(s, fg(40 + RED)) - - def igreen(self, *s): - return self.node(s, fg(40 + GREEN)) - - def iyellow(self, *s): - return self.node(s, fg(40 + YELLOW)) - - def iblue(self, *s): - return self.node(s, fg(40 + BLUE)) - - def imagenta(self, *s): - return self.node(s, fg(40 + MAGENTA)) - - def icyan(self, *s): - return self.node(s, fg(40 + CYAN)) - - def iwhite(self, *s): - return self.node(s, fg(40 + WHITE)) - - def reset(self, *s): - return self.node(s or [''], RESET_SEQ) - - def __add__(self, other): - return string(self) + string(other) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/text.py b/thesisenv/lib/python3.6/site-packages/celery/utils/text.py deleted file mode 100644 index ffd2d72..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/text.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils.text - ~~~~~~~~~~~~~~~~~ - - Text formatting utilities - -""" -from __future__ import absolute_import - -from textwrap import fill - -from pprint import pformat - -__all__ = ['dedent_initial', 'dedent', 'fill_paragraphs', 'join', - 'ensure_2lines', 'abbr', 'abbrtask', 'indent', 'truncate', - 'pluralize', 'pretty'] - - -def dedent_initial(s, n=4): - return s[n:] if s[:n] == ' ' * n else s - - -def dedent(s, n=4, sep='\n'): - return sep.join(dedent_initial(l) for l in s.splitlines()) - - -def fill_paragraphs(s, width, sep='\n'): - return sep.join(fill(p, width) for p in s.split(sep)) - - -def join(l, sep='\n'): - return sep.join(v for v in l if v) - - -def ensure_2lines(s, sep='\n'): - if len(s.splitlines()) <= 2: - return s + sep - return s - - -def abbr(S, max, ellipsis='...'): - if S is None: - return '???' - if len(S) > max: - return ellipsis and (S[:max - len(ellipsis)] + ellipsis) or S[:max] - return S - - -def abbrtask(S, max): - if S is None: - return '???' - if len(S) > max: - module, _, cls = S.rpartition('.') - module = abbr(module, max - len(cls) - 3, False) - return module + '[.]' + cls - return S - - -def indent(t, indent=0, sep='\n'): - """Indent text.""" - return sep.join(' ' * indent + p for p in t.split(sep)) - - -def truncate(text, maxlen=128, suffix='...'): - """Truncates text to a maximum number of characters.""" - if len(text) >= maxlen: - return text[:maxlen].rsplit(' ', 1)[0] + suffix - return text - - -def pluralize(n, text, suffix='s'): - if n > 1: - return text + suffix - return text - - -def pretty(value, width=80, nl_width=80, sep='\n', **kw): - if isinstance(value, dict): - return '{{{0} {1}'.format(sep, pformat(value, 4, nl_width)[1:]) - elif isinstance(value, tuple): - return '{0}{1}{2}'.format( - sep, ' ' * 4, pformat(value, width=nl_width, **kw), - ) - else: - return pformat(value, width=width, **kw) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/threads.py b/thesisenv/lib/python3.6/site-packages/celery/utils/threads.py deleted file mode 100644 index 5d42373..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/threads.py +++ /dev/null @@ -1,329 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils.threads - ~~~~~~~~~~~~~~~~~~~~ - - Threading utilities. - -""" -from __future__ import absolute_import, print_function - -import os -import socket -import sys -import threading -import traceback - -from contextlib import contextmanager - -from celery.local import Proxy -from celery.five import THREAD_TIMEOUT_MAX, items - -__all__ = ['bgThread', 'Local', 'LocalStack', 'LocalManager', - 'get_ident', 'default_socket_timeout'] - -USE_FAST_LOCALS = os.environ.get('USE_FAST_LOCALS') -PY3 = sys.version_info[0] == 3 - - -@contextmanager -def default_socket_timeout(timeout): - prev = socket.getdefaulttimeout() - socket.setdefaulttimeout(timeout) - yield - socket.setdefaulttimeout(prev) - - -class bgThread(threading.Thread): - - def __init__(self, name=None, **kwargs): - super(bgThread, self).__init__() - self._is_shutdown = threading.Event() - self._is_stopped = threading.Event() - self.daemon = True - self.name = name or self.__class__.__name__ - - def body(self): - raise NotImplementedError('subclass responsibility') - - def on_crash(self, msg, *fmt, **kwargs): - print(msg.format(*fmt), file=sys.stderr) - exc_info = sys.exc_info() - try: - traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], - None, sys.stderr) - finally: - del(exc_info) - - def run(self): - body = self.body - shutdown_set = self._is_shutdown.is_set - try: - while not shutdown_set(): - try: - body() - except Exception as exc: - try: - self.on_crash('{0!r} crashed: {1!r}', self.name, exc) - self._set_stopped() - finally: - os._exit(1) # exiting by normal means won't work - finally: - self._set_stopped() - - def _set_stopped(self): - try: - self._is_stopped.set() - except TypeError: # pragma: no cover - # we lost the race at interpreter shutdown, - # so gc collected built-in modules. - pass - - def stop(self): - """Graceful shutdown.""" - self._is_shutdown.set() - self._is_stopped.wait() - if self.is_alive(): - self.join(THREAD_TIMEOUT_MAX) - -try: - from greenlet import getcurrent as get_ident -except ImportError: # pragma: no cover - try: - from _thread import get_ident # noqa - except ImportError: - try: - from thread import get_ident # noqa - except ImportError: # pragma: no cover - try: - from _dummy_thread import get_ident # noqa - except ImportError: - from dummy_thread import get_ident # noqa - - -def release_local(local): - """Releases the contents of the local for the current context. - This makes it possible to use locals without a manager. - - Example:: - - >>> loc = Local() - >>> loc.foo = 42 - >>> release_local(loc) - >>> hasattr(loc, 'foo') - False - - With this function one can release :class:`Local` objects as well - as :class:`StackLocal` objects. However it is not possible to - release data held by proxies that way, one always has to retain - a reference to the underlying local object in order to be able - to release it. - - .. versionadded:: 0.6.1 - """ - local.__release_local__() - - -class Local(object): - __slots__ = ('__storage__', '__ident_func__') - - def __init__(self): - object.__setattr__(self, '__storage__', {}) - object.__setattr__(self, '__ident_func__', get_ident) - - def __iter__(self): - return iter(items(self.__storage__)) - - def __call__(self, proxy): - """Create a proxy for a name.""" - return Proxy(self, proxy) - - def __release_local__(self): - self.__storage__.pop(self.__ident_func__(), None) - - def __getattr__(self, name): - try: - return self.__storage__[self.__ident_func__()][name] - except KeyError: - raise AttributeError(name) - - def __setattr__(self, name, value): - ident = self.__ident_func__() - storage = self.__storage__ - try: - storage[ident][name] = value - except KeyError: - storage[ident] = {name: value} - - def __delattr__(self, name): - try: - del self.__storage__[self.__ident_func__()][name] - except KeyError: - raise AttributeError(name) - - -class _LocalStack(object): - """This class works similar to a :class:`Local` but keeps a stack - of objects instead. This is best explained with an example:: - - >>> ls = LocalStack() - >>> ls.push(42) - >>> ls.top - 42 - >>> ls.push(23) - >>> ls.top - 23 - >>> ls.pop() - 23 - >>> ls.top - 42 - - They can be force released by using a :class:`LocalManager` or with - the :func:`release_local` function but the correct way is to pop the - item from the stack after using. When the stack is empty it will - no longer be bound to the current context (and as such released). - - By calling the stack without arguments it will return a proxy that - resolves to the topmost item on the stack. - - """ - - def __init__(self): - self._local = Local() - - def __release_local__(self): - self._local.__release_local__() - - def _get__ident_func__(self): - return self._local.__ident_func__ - - def _set__ident_func__(self, value): - object.__setattr__(self._local, '__ident_func__', value) - __ident_func__ = property(_get__ident_func__, _set__ident_func__) - del _get__ident_func__, _set__ident_func__ - - def __call__(self): - def _lookup(): - rv = self.top - if rv is None: - raise RuntimeError('object unbound') - return rv - return Proxy(_lookup) - - def push(self, obj): - """Pushes a new item to the stack""" - rv = getattr(self._local, 'stack', None) - if rv is None: - self._local.stack = rv = [] - rv.append(obj) - return rv - - def pop(self): - """Remove the topmost item from the stack, will return the - old value or `None` if the stack was already empty. - """ - stack = getattr(self._local, 'stack', None) - if stack is None: - return None - elif len(stack) == 1: - release_local(self._local) - return stack[-1] - else: - return stack.pop() - - def __len__(self): - stack = getattr(self._local, 'stack', None) - return len(stack) if stack else 0 - - @property - def stack(self): - """get_current_worker_task uses this to find - the original task that was executed by the worker.""" - stack = getattr(self._local, 'stack', None) - if stack is not None: - return stack - return [] - - @property - def top(self): - """The topmost item on the stack. If the stack is empty, - `None` is returned. - """ - try: - return self._local.stack[-1] - except (AttributeError, IndexError): - return None - - -class LocalManager(object): - """Local objects cannot manage themselves. For that you need a local - manager. You can pass a local manager multiple locals or add them - later by appending them to `manager.locals`. Everytime the manager - cleans up it, will clean up all the data left in the locals for this - context. - - The `ident_func` parameter can be added to override the default ident - function for the wrapped locals. - - """ - - def __init__(self, locals=None, ident_func=None): - if locals is None: - self.locals = [] - elif isinstance(locals, Local): - self.locals = [locals] - else: - self.locals = list(locals) - if ident_func is not None: - self.ident_func = ident_func - for local in self.locals: - object.__setattr__(local, '__ident_func__', ident_func) - else: - self.ident_func = get_ident - - def get_ident(self): - """Return the context identifier the local objects use internally - for this context. You cannot override this method to change the - behavior but use it to link other context local objects (such as - SQLAlchemy's scoped sessions) to the Werkzeug locals.""" - return self.ident_func() - - def cleanup(self): - """Manually clean up the data in the locals for this context. - - Call this at the end of the request or use `make_middleware()`. - - """ - for local in self.locals: - release_local(local) - - def __repr__(self): - return '<{0} storages: {1}>'.format( - self.__class__.__name__, len(self.locals)) - - -class _FastLocalStack(threading.local): - - def __init__(self): - self.stack = [] - self.push = self.stack.append - self.pop = self.stack.pop - - @property - def top(self): - try: - return self.stack[-1] - except (AttributeError, IndexError): - return None - - def __len__(self): - return len(self.stack) - -if USE_FAST_LOCALS: # pragma: no cover - LocalStack = _FastLocalStack -else: - # - See #706 - # since each thread has its own greenlet we can just use those as - # identifiers for the context. If greenlets are not available we - # fall back to the current thread ident. - LocalStack = _LocalStack # noqa diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/timer2.py b/thesisenv/lib/python3.6/site-packages/celery/utils/timer2.py deleted file mode 100644 index e42660c..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/timer2.py +++ /dev/null @@ -1,144 +0,0 @@ -# -*- coding: utf-8 -*- -""" - timer2 - ~~~~~~ - - Scheduler for Python functions. - -""" -from __future__ import absolute_import - -import os -import sys -import threading - -from itertools import count -from time import sleep - -from celery.five import THREAD_TIMEOUT_MAX -from kombu.async.timer import Entry, Timer as Schedule, to_timestamp, logger - -TIMER_DEBUG = os.environ.get('TIMER_DEBUG') - -__all__ = ['Entry', 'Schedule', 'Timer', 'to_timestamp'] - - -class Timer(threading.Thread): - Entry = Entry - Schedule = Schedule - - running = False - on_tick = None - _timer_count = count(1) - - if TIMER_DEBUG: # pragma: no cover - def start(self, *args, **kwargs): - import traceback - print('- Timer starting') - traceback.print_stack() - super(Timer, self).start(*args, **kwargs) - - def __init__(self, schedule=None, on_error=None, on_tick=None, - on_start=None, max_interval=None, **kwargs): - self.schedule = schedule or self.Schedule(on_error=on_error, - max_interval=max_interval) - self.on_start = on_start - self.on_tick = on_tick or self.on_tick - threading.Thread.__init__(self) - self._is_shutdown = threading.Event() - self._is_stopped = threading.Event() - self.mutex = threading.Lock() - self.not_empty = threading.Condition(self.mutex) - self.daemon = True - self.name = 'Timer-{0}'.format(next(self._timer_count)) - - def _next_entry(self): - with self.not_empty: - delay, entry = next(self.scheduler) - if entry is None: - if delay is None: - self.not_empty.wait(1.0) - return delay - return self.schedule.apply_entry(entry) - __next__ = next = _next_entry # for 2to3 - - def run(self): - try: - self.running = True - self.scheduler = iter(self.schedule) - - while not self._is_shutdown.isSet(): - delay = self._next_entry() - if delay: - if self.on_tick: - self.on_tick(delay) - if sleep is None: # pragma: no cover - break - sleep(delay) - try: - self._is_stopped.set() - except TypeError: # pragma: no cover - # we lost the race at interpreter shutdown, - # so gc collected built-in modules. - pass - except Exception as exc: - logger.error('Thread Timer crashed: %r', exc, exc_info=True) - os._exit(1) - - def stop(self): - self._is_shutdown.set() - if self.running: - self._is_stopped.wait() - self.join(THREAD_TIMEOUT_MAX) - self.running = False - - def ensure_started(self): - if not self.running and not self.isAlive(): - if self.on_start: - self.on_start(self) - self.start() - - def _do_enter(self, meth, *args, **kwargs): - self.ensure_started() - with self.mutex: - entry = getattr(self.schedule, meth)(*args, **kwargs) - self.not_empty.notify() - return entry - - def enter(self, entry, eta, priority=None): - return self._do_enter('enter_at', entry, eta, priority=priority) - - def call_at(self, *args, **kwargs): - return self._do_enter('call_at', *args, **kwargs) - - def enter_after(self, *args, **kwargs): - return self._do_enter('enter_after', *args, **kwargs) - - def call_after(self, *args, **kwargs): - return self._do_enter('call_after', *args, **kwargs) - - def call_repeatedly(self, *args, **kwargs): - return self._do_enter('call_repeatedly', *args, **kwargs) - - def exit_after(self, secs, priority=10): - self.call_after(secs, sys.exit, priority) - - def cancel(self, tref): - tref.cancel() - - def clear(self): - self.schedule.clear() - - def empty(self): - return not len(self) - - def __len__(self): - return len(self.schedule) - - def __bool__(self): - return True - __nonzero__ = __bool__ - - @property - def queue(self): - return self.schedule.queue diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/timeutils.py b/thesisenv/lib/python3.6/site-packages/celery/utils/timeutils.py deleted file mode 100644 index 6dab703..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/utils/timeutils.py +++ /dev/null @@ -1,370 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.utils.timeutils - ~~~~~~~~~~~~~~~~~~~~~~ - - This module contains various utilities related to dates and times. - -""" -from __future__ import absolute_import - -import numbers -import os -import sys -import time as _time - -from calendar import monthrange -from datetime import date, datetime, timedelta, tzinfo - -from kombu.utils import cached_property, reprcall -from kombu.utils.compat import timedelta_seconds - -from pytz import timezone as _timezone, AmbiguousTimeError, FixedOffset - -from celery.five import string_t - -from .functional import dictfilter -from .iso8601 import parse_iso8601 -from .text import pluralize - -__all__ = ['LocalTimezone', 'timezone', 'maybe_timedelta', 'timedelta_seconds', - 'delta_resolution', 'remaining', 'rate', 'weekday', - 'humanize_seconds', 'maybe_iso8601', 'is_naive', 'make_aware', - 'localize', 'to_utc', 'maybe_make_aware', 'ffwd', 'utcoffset', - 'adjust_timestamp', 'maybe_s_to_ms'] - -PY3 = sys.version_info[0] == 3 -PY33 = sys.version_info >= (3, 3) - -C_REMDEBUG = os.environ.get('C_REMDEBUG', False) - -DAYNAMES = 'sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat' -WEEKDAYS = dict(zip(DAYNAMES, range(7))) - -RATE_MODIFIER_MAP = {'s': lambda n: n, - 'm': lambda n: n / 60.0, - 'h': lambda n: n / 60.0 / 60.0} - -TIME_UNITS = (('day', 60 * 60 * 24.0, lambda n: format(n, '.2f')), - ('hour', 60 * 60.0, lambda n: format(n, '.2f')), - ('minute', 60.0, lambda n: format(n, '.2f')), - ('second', 1.0, lambda n: format(n, '.2f'))) - -ZERO = timedelta(0) - -_local_timezone = None - - -class LocalTimezone(tzinfo): - """Local time implementation taken from Python's docs. - - Used only when UTC is not enabled. - """ - _offset_cache = {} - - def __init__(self): - # This code is moved in __init__ to execute it as late as possible - # See get_default_timezone(). - self.STDOFFSET = timedelta(seconds=-_time.timezone) - if _time.daylight: - self.DSTOFFSET = timedelta(seconds=-_time.altzone) - else: - self.DSTOFFSET = self.STDOFFSET - self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET - tzinfo.__init__(self) - - def __repr__(self): - return ''.format( - int(timedelta_seconds(self.DSTOFFSET) / 3600), - ) - - def utcoffset(self, dt): - return self.DSTOFFSET if self._isdst(dt) else self.STDOFFSET - - def dst(self, dt): - return self.DSTDIFF if self._isdst(dt) else ZERO - - def tzname(self, dt): - return _time.tzname[self._isdst(dt)] - - if PY3: - - def fromutc(self, dt): - # The base tzinfo class no longer implements a DST - # offset aware .fromutc() in Python3 (Issue #2306). - - # I'd rather rely on pytz to do this, than port - # the C code from cpython's fromutc [asksol] - offset = int(self.utcoffset(dt).seconds / 60.0) - try: - tz = self._offset_cache[offset] - except KeyError: - tz = self._offset_cache[offset] = FixedOffset(offset) - return tz.fromutc(dt.replace(tzinfo=tz)) - - def _isdst(self, dt): - tt = (dt.year, dt.month, dt.day, - dt.hour, dt.minute, dt.second, - dt.weekday(), 0, 0) - stamp = _time.mktime(tt) - tt = _time.localtime(stamp) - return tt.tm_isdst > 0 - - -class _Zone(object): - - def tz_or_local(self, tzinfo=None): - if tzinfo is None: - return self.local - return self.get_timezone(tzinfo) - - def to_local(self, dt, local=None, orig=None): - if is_naive(dt): - dt = make_aware(dt, orig or self.utc) - return localize(dt, self.tz_or_local(local)) - - if PY33: - - def to_system(self, dt): - # tz=None is a special case since Python 3.3, and will - # convert to the current local timezone (Issue #2306). - return dt.astimezone(tz=None) - - else: - - def to_system(self, dt): # noqa - return localize(dt, self.local) - - def to_local_fallback(self, dt): - if is_naive(dt): - return make_aware(dt, self.local) - return localize(dt, self.local) - - def get_timezone(self, zone): - if isinstance(zone, string_t): - return _timezone(zone) - return zone - - @cached_property - def local(self): - return LocalTimezone() - - @cached_property - def utc(self): - return self.get_timezone('UTC') -timezone = _Zone() - - -def maybe_timedelta(delta): - """Coerces integer to timedelta if `delta` is an integer.""" - if isinstance(delta, numbers.Real): - return timedelta(seconds=delta) - return delta - - -def delta_resolution(dt, delta): - """Round a datetime to the resolution of a timedelta. - - If the timedelta is in days, the datetime will be rounded - to the nearest days, if the timedelta is in hours the datetime - will be rounded to the nearest hour, and so on until seconds - which will just return the original datetime. - - """ - delta = timedelta_seconds(delta) - - resolutions = ((3, lambda x: x / 86400), - (4, lambda x: x / 3600), - (5, lambda x: x / 60)) - - args = dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second - for res, predicate in resolutions: - if predicate(delta) >= 1.0: - return datetime(*args[:res], tzinfo=dt.tzinfo) - return dt - - -def remaining(start, ends_in, now=None, relative=False): - """Calculate the remaining time for a start date and a timedelta. - - e.g. "how many seconds left for 30 seconds after start?" - - :param start: Start :class:`~datetime.datetime`. - :param ends_in: The end delta as a :class:`~datetime.timedelta`. - :keyword relative: If enabled the end time will be - calculated using :func:`delta_resolution` (i.e. rounded to the - resolution of `ends_in`). - :keyword now: Function returning the current time and date, - defaults to :func:`datetime.utcnow`. - - """ - now = now or datetime.utcnow() - end_date = start + ends_in - if relative: - end_date = delta_resolution(end_date, ends_in) - ret = end_date - now - if C_REMDEBUG: # pragma: no cover - print('rem: NOW:%r START:%r ENDS_IN:%r END_DATE:%s REM:%s' % ( - now, start, ends_in, end_date, ret)) - return ret - - -def rate(rate): - """Parse rate strings, such as `"100/m"`, `"2/h"` or `"0.5/s"` - and convert them to seconds.""" - if rate: - if isinstance(rate, string_t): - ops, _, modifier = rate.partition('/') - return RATE_MODIFIER_MAP[modifier or 's'](float(ops)) or 0 - return rate or 0 - return 0 - - -def weekday(name): - """Return the position of a weekday (0 - 7, where 0 is Sunday). - - Example:: - - >>> weekday('sunday'), weekday('sun'), weekday('mon') - (0, 0, 1) - - """ - abbreviation = name[0:3].lower() - try: - return WEEKDAYS[abbreviation] - except KeyError: - # Show original day name in exception, instead of abbr. - raise KeyError(name) - - -def humanize_seconds(secs, prefix='', sep='', now='now'): - """Show seconds in human form, e.g. 60 is "1 minute", 7200 is "2 - hours". - - :keyword prefix: Can be used to add a preposition to the output, - e.g. 'in' will give 'in 1 second', but add nothing to 'now'. - - """ - secs = float(secs) - for unit, divider, formatter in TIME_UNITS: - if secs >= divider: - w = secs / divider - return '{0}{1}{2} {3}'.format(prefix, sep, formatter(w), - pluralize(w, unit)) - return now - - -def maybe_iso8601(dt): - """`Either datetime | str -> datetime or None -> None`""" - if not dt: - return - if isinstance(dt, datetime): - return dt - return parse_iso8601(dt) - - -def is_naive(dt): - """Return :const:`True` if the datetime is naive - (does not have timezone information).""" - return dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None - - -def make_aware(dt, tz): - """Sets the timezone for a datetime object.""" - try: - _localize = tz.localize - except AttributeError: - return dt.replace(tzinfo=tz) - else: - # works on pytz timezones - try: - return _localize(dt, is_dst=None) - except AmbiguousTimeError: - return min(_localize(dt, is_dst=True), - _localize(dt, is_dst=False)) - - -def localize(dt, tz): - """Convert aware datetime to another timezone.""" - dt = dt.astimezone(tz) - try: - _normalize = tz.normalize - except AttributeError: # non-pytz tz - return dt - else: - try: - return _normalize(dt, is_dst=None) - except TypeError: - return _normalize(dt) - except AmbiguousTimeError: - return min(_normalize(dt, is_dst=True), - _normalize(dt, is_dst=False)) - - -def to_utc(dt): - """Converts naive datetime to UTC""" - return make_aware(dt, timezone.utc) - - -def maybe_make_aware(dt, tz=None): - if is_naive(dt): - dt = to_utc(dt) - return localize( - dt, timezone.utc if tz is None else timezone.tz_or_local(tz), - ) - - -class ffwd(object): - """Version of relativedelta that only supports addition.""" - - def __init__(self, year=None, month=None, weeks=0, weekday=None, day=None, - hour=None, minute=None, second=None, microsecond=None, - **kwargs): - self.year = year - self.month = month - self.weeks = weeks - self.weekday = weekday - self.day = day - self.hour = hour - self.minute = minute - self.second = second - self.microsecond = microsecond - self.days = weeks * 7 - self._has_time = self.hour is not None or self.minute is not None - - def __repr__(self): - return reprcall('ffwd', (), self._fields(weeks=self.weeks, - weekday=self.weekday)) - - def __radd__(self, other): - if not isinstance(other, date): - return NotImplemented - year = self.year or other.year - month = self.month or other.month - day = min(monthrange(year, month)[1], self.day or other.day) - ret = other.replace(**dict(dictfilter(self._fields()), - year=year, month=month, day=day)) - if self.weekday is not None: - ret += timedelta(days=(7 - ret.weekday() + self.weekday) % 7) - return ret + timedelta(days=self.days) - - def _fields(self, **extra): - return dictfilter({ - 'year': self.year, 'month': self.month, 'day': self.day, - 'hour': self.hour, 'minute': self.minute, - 'second': self.second, 'microsecond': self.microsecond, - }, **extra) - - -def utcoffset(time=_time, localtime=_time.localtime): - if localtime().tm_isdst: - return time.altzone // 3600 - return time.timezone // 3600 - - -def adjust_timestamp(ts, offset, here=utcoffset): - return ts - (offset - here()) * 3600 - - -def maybe_s_to_ms(v): - return int(float(v) * 1000.0) if v is not None else v diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/worker/__init__.py deleted file mode 100644 index 3d65dd1..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/__init__.py +++ /dev/null @@ -1,393 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.worker - ~~~~~~~~~~~~~ - - :class:`WorkController` can be used to instantiate in-process workers. - - The worker consists of several components, all managed by bootsteps - (mod:`celery.bootsteps`). - -""" -from __future__ import absolute_import - -import os -import sys -import traceback -try: - import resource -except ImportError: # pragma: no cover - resource = None # noqa - -from billiard import cpu_count -from billiard.util import Finalize -from kombu.syn import detect_environment - -from celery import bootsteps -from celery.bootsteps import RUN, TERMINATE -from celery import concurrency as _concurrency -from celery import platforms -from celery import signals -from celery.exceptions import ( - ImproperlyConfigured, WorkerTerminate, TaskRevokedError, -) -from celery.five import string_t, values -from celery.utils import default_nodename, worker_direct -from celery.utils.imports import reload_from_cwd -from celery.utils.log import mlevel, worker_logger as logger -from celery.utils.threads import default_socket_timeout - -from . import state - -__all__ = ['WorkController', 'default_nodename'] - -#: Default socket timeout at shutdown. -SHUTDOWN_SOCKET_TIMEOUT = 5.0 - -SELECT_UNKNOWN_QUEUE = """\ -Trying to select queue subset of {0!r}, but queue {1} is not -defined in the CELERY_QUEUES setting. - -If you want to automatically declare unknown queues you can -enable the CELERY_CREATE_MISSING_QUEUES setting. -""" - -DESELECT_UNKNOWN_QUEUE = """\ -Trying to deselect queue subset of {0!r}, but queue {1} is not -defined in the CELERY_QUEUES setting. -""" - - -def str_to_list(s): - if isinstance(s, string_t): - return s.split(',') - return s - - -class WorkController(object): - """Unmanaged worker instance.""" - app = None - - pidlock = None - blueprint = None - pool = None - semaphore = None - - class Blueprint(bootsteps.Blueprint): - """Worker bootstep blueprint.""" - name = 'Worker' - default_steps = set([ - 'celery.worker.components:Hub', - 'celery.worker.components:Queues', - 'celery.worker.components:Pool', - 'celery.worker.components:Beat', - 'celery.worker.components:Timer', - 'celery.worker.components:StateDB', - 'celery.worker.components:Consumer', - 'celery.worker.autoscale:WorkerComponent', - 'celery.worker.autoreload:WorkerComponent', - - ]) - - def __init__(self, app=None, hostname=None, **kwargs): - self.app = app or self.app - self.hostname = default_nodename(hostname) - self.app.loader.init_worker() - self.on_before_init(**kwargs) - self.setup_defaults(**kwargs) - self.on_after_init(**kwargs) - - self.setup_instance(**self.prepare_args(**kwargs)) - self._finalize = [ - Finalize(self, self._send_worker_shutdown, exitpriority=10), - ] - - def setup_instance(self, queues=None, ready_callback=None, pidfile=None, - include=None, use_eventloop=None, exclude_queues=None, - **kwargs): - self.pidfile = pidfile - self.setup_queues(queues, exclude_queues) - self.setup_includes(str_to_list(include)) - - # Set default concurrency - if not self.concurrency: - try: - self.concurrency = cpu_count() - except NotImplementedError: - self.concurrency = 2 - - # Options - self.loglevel = mlevel(self.loglevel) - self.ready_callback = ready_callback or self.on_consumer_ready - - # this connection is not established, only used for params - self._conninfo = self.app.connection() - self.use_eventloop = ( - self.should_use_eventloop() if use_eventloop is None - else use_eventloop - ) - self.options = kwargs - - signals.worker_init.send(sender=self) - - # Initialize bootsteps - self.pool_cls = _concurrency.get_implementation(self.pool_cls) - self.steps = [] - self.on_init_blueprint() - self.blueprint = self.Blueprint(app=self.app, - on_start=self.on_start, - on_close=self.on_close, - on_stopped=self.on_stopped) - self.blueprint.apply(self, **kwargs) - - def on_init_blueprint(self): - pass - - def on_before_init(self, **kwargs): - pass - - def on_after_init(self, **kwargs): - pass - - def on_start(self): - if self.pidfile: - self.pidlock = platforms.create_pidlock(self.pidfile) - - def on_consumer_ready(self, consumer): - pass - - def on_close(self): - self.app.loader.shutdown_worker() - - def on_stopped(self): - self.timer.stop() - self.consumer.shutdown() - - if self.pidlock: - self.pidlock.release() - - def setup_queues(self, include, exclude=None): - include = str_to_list(include) - exclude = str_to_list(exclude) - try: - self.app.amqp.queues.select(include) - except KeyError as exc: - raise ImproperlyConfigured( - SELECT_UNKNOWN_QUEUE.format(include, exc)) - try: - self.app.amqp.queues.deselect(exclude) - except KeyError as exc: - raise ImproperlyConfigured( - DESELECT_UNKNOWN_QUEUE.format(exclude, exc)) - if self.app.conf.CELERY_WORKER_DIRECT: - self.app.amqp.queues.select_add(worker_direct(self.hostname)) - - def setup_includes(self, includes): - # Update celery_include to have all known task modules, so that we - # ensure all task modules are imported in case an execv happens. - prev = tuple(self.app.conf.CELERY_INCLUDE) - if includes: - prev += tuple(includes) - [self.app.loader.import_task_module(m) for m in includes] - self.include = includes - task_modules = set(task.__class__.__module__ - for task in values(self.app.tasks)) - self.app.conf.CELERY_INCLUDE = tuple(set(prev) | task_modules) - - def prepare_args(self, **kwargs): - return kwargs - - def _send_worker_shutdown(self): - signals.worker_shutdown.send(sender=self) - - def start(self): - """Starts the workers main loop.""" - try: - self.blueprint.start(self) - except WorkerTerminate: - self.terminate() - except Exception as exc: - logger.error('Unrecoverable error: %r', exc, exc_info=True) - self.stop() - except (KeyboardInterrupt, SystemExit): - self.stop() - - def register_with_event_loop(self, hub): - self.blueprint.send_all( - self, 'register_with_event_loop', args=(hub, ), - description='hub.register', - ) - - def _process_task_sem(self, req): - return self._quick_acquire(self._process_task, req) - - def _process_task(self, req): - """Process task by sending it to the pool of workers.""" - try: - req.execute_using_pool(self.pool) - except TaskRevokedError: - try: - self._quick_release() # Issue 877 - except AttributeError: - pass - except Exception as exc: - logger.critical('Internal error: %r\n%s', - exc, traceback.format_exc(), exc_info=True) - - def signal_consumer_close(self): - try: - self.consumer.close() - except AttributeError: - pass - - def should_use_eventloop(self): - return (detect_environment() == 'default' and - self._conninfo.is_evented and not self.app.IS_WINDOWS) - - def stop(self, in_sighandler=False): - """Graceful shutdown of the worker server.""" - if self.blueprint.state == RUN: - self.signal_consumer_close() - if not in_sighandler or self.pool.signal_safe: - self._shutdown(warm=True) - - def terminate(self, in_sighandler=False): - """Not so graceful shutdown of the worker server.""" - if self.blueprint.state != TERMINATE: - self.signal_consumer_close() - if not in_sighandler or self.pool.signal_safe: - self._shutdown(warm=False) - - def _shutdown(self, warm=True): - # if blueprint does not exist it means that we had an - # error before the bootsteps could be initialized. - if self.blueprint is not None: - with default_socket_timeout(SHUTDOWN_SOCKET_TIMEOUT): # Issue 975 - self.blueprint.stop(self, terminate=not warm) - self.blueprint.join() - - def reload(self, modules=None, reload=False, reloader=None): - modules = self.app.loader.task_modules if modules is None else modules - imp = self.app.loader.import_from_cwd - - for module in set(modules or ()): - if module not in sys.modules: - logger.debug('importing module %s', module) - imp(module) - elif reload: - logger.debug('reloading module %s', module) - reload_from_cwd(sys.modules[module], reloader) - - if self.consumer: - self.consumer.update_strategies() - self.consumer.reset_rate_limits() - try: - self.pool.restart() - except NotImplementedError: - pass - - def info(self): - return {'total': self.state.total_count, - 'pid': os.getpid(), - 'clock': str(self.app.clock)} - - def rusage(self): - if resource is None: - raise NotImplementedError('rusage not supported by this platform') - s = resource.getrusage(resource.RUSAGE_SELF) - return { - 'utime': s.ru_utime, - 'stime': s.ru_stime, - 'maxrss': s.ru_maxrss, - 'ixrss': s.ru_ixrss, - 'idrss': s.ru_idrss, - 'isrss': s.ru_isrss, - 'minflt': s.ru_minflt, - 'majflt': s.ru_majflt, - 'nswap': s.ru_nswap, - 'inblock': s.ru_inblock, - 'oublock': s.ru_oublock, - 'msgsnd': s.ru_msgsnd, - 'msgrcv': s.ru_msgrcv, - 'nsignals': s.ru_nsignals, - 'nvcsw': s.ru_nvcsw, - 'nivcsw': s.ru_nivcsw, - } - - def stats(self): - info = self.info() - info.update(self.blueprint.info(self)) - info.update(self.consumer.blueprint.info(self.consumer)) - try: - info['rusage'] = self.rusage() - except NotImplementedError: - info['rusage'] = 'N/A' - return info - - def __repr__(self): - return ''.format( - self=self, - state=(self.blueprint.human_state() - if self.blueprint else 'initializing'), # Issue #2514 - ) - - def __str__(self): - return self.hostname - - @property - def state(self): - return state - - def setup_defaults(self, concurrency=None, loglevel=None, logfile=None, - send_events=None, pool_cls=None, consumer_cls=None, - timer_cls=None, timer_precision=None, - autoscaler_cls=None, autoreloader_cls=None, - pool_putlocks=None, pool_restarts=None, - force_execv=None, state_db=None, - schedule_filename=None, scheduler_cls=None, - task_time_limit=None, task_soft_time_limit=None, - max_tasks_per_child=None, prefetch_multiplier=None, - disable_rate_limits=None, worker_lost_wait=None, **_kw): - self.concurrency = self._getopt('concurrency', concurrency) - self.loglevel = self._getopt('log_level', loglevel) - self.logfile = self._getopt('log_file', logfile) - self.send_events = self._getopt('send_events', send_events) - self.pool_cls = self._getopt('pool', pool_cls) - self.consumer_cls = self._getopt('consumer', consumer_cls) - self.timer_cls = self._getopt('timer', timer_cls) - self.timer_precision = self._getopt('timer_precision', timer_precision) - self.autoscaler_cls = self._getopt('autoscaler', autoscaler_cls) - self.autoreloader_cls = self._getopt('autoreloader', autoreloader_cls) - self.pool_putlocks = self._getopt('pool_putlocks', pool_putlocks) - self.pool_restarts = self._getopt('pool_restarts', pool_restarts) - self.force_execv = self._getopt('force_execv', force_execv) - self.state_db = self._getopt('state_db', state_db) - self.schedule_filename = self._getopt( - 'schedule_filename', schedule_filename, - ) - self.scheduler_cls = self._getopt( - 'celerybeat_scheduler', scheduler_cls, - ) - self.task_time_limit = self._getopt( - 'task_time_limit', task_time_limit, - ) - self.task_soft_time_limit = self._getopt( - 'task_soft_time_limit', task_soft_time_limit, - ) - self.max_tasks_per_child = self._getopt( - 'max_tasks_per_child', max_tasks_per_child, - ) - self.prefetch_multiplier = int(self._getopt( - 'prefetch_multiplier', prefetch_multiplier, - )) - self.disable_rate_limits = self._getopt( - 'disable_rate_limits', disable_rate_limits, - ) - self.worker_lost_wait = self._getopt( - 'worker_lost_wait', worker_lost_wait, - ) - - def _getopt(self, key, value): - if value is not None: - return value - return self.app.conf.find_value_for_key(key, namespace='celeryd') diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/autoreload.py b/thesisenv/lib/python3.6/site-packages/celery/worker/autoreload.py deleted file mode 100644 index 8ade32f..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/autoreload.py +++ /dev/null @@ -1,302 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.worker.autoreload - ~~~~~~~~~~~~~~~~~~~~~~~~ - - This module implements automatic module reloading -""" -from __future__ import absolute_import - -import hashlib -import os -import select -import sys -import time - -from collections import defaultdict -from threading import Event - -from kombu.utils import eventio -from kombu.utils.encoding import ensure_bytes - -from celery import bootsteps -from celery.five import items -from celery.platforms import ignore_errno -from celery.utils.imports import module_file -from celery.utils.log import get_logger -from celery.utils.threads import bgThread - -from .components import Pool - -try: # pragma: no cover - import pyinotify - _ProcessEvent = pyinotify.ProcessEvent -except ImportError: # pragma: no cover - pyinotify = None # noqa - _ProcessEvent = object # noqa - -__all__ = [ - 'WorkerComponent', 'Autoreloader', 'Monitor', 'BaseMonitor', - 'StatMonitor', 'KQueueMonitor', 'InotifyMonitor', 'file_hash', -] - -logger = get_logger(__name__) - - -class WorkerComponent(bootsteps.StartStopStep): - label = 'Autoreloader' - conditional = True - requires = (Pool, ) - - def __init__(self, w, autoreload=None, **kwargs): - self.enabled = w.autoreload = autoreload - w.autoreloader = None - - def create(self, w): - w.autoreloader = self.instantiate(w.autoreloader_cls, w) - return w.autoreloader if not w.use_eventloop else None - - def register_with_event_loop(self, w, hub): - w.autoreloader.register_with_event_loop(hub) - hub.on_close.add(w.autoreloader.on_event_loop_close) - - -def file_hash(filename, algorithm='md5'): - hobj = hashlib.new(algorithm) - with open(filename, 'rb') as f: - for chunk in iter(lambda: f.read(2 ** 20), ''): - hobj.update(ensure_bytes(chunk)) - return hobj.digest() - - -class BaseMonitor(object): - - def __init__(self, files, - on_change=None, shutdown_event=None, interval=0.5): - self.files = files - self.interval = interval - self._on_change = on_change - self.modify_times = defaultdict(int) - self.shutdown_event = shutdown_event or Event() - - def start(self): - raise NotImplementedError('Subclass responsibility') - - def stop(self): - pass - - def on_change(self, modified): - if self._on_change: - return self._on_change(modified) - - def on_event_loop_close(self, hub): - pass - - -class StatMonitor(BaseMonitor): - """File change monitor based on the ``stat`` system call.""" - - def _mtimes(self): - return ((f, self._mtime(f)) for f in self.files) - - def _maybe_modified(self, f, mt): - return mt is not None and self.modify_times[f] != mt - - def register_with_event_loop(self, hub): - hub.call_repeatedly(2.0, self.find_changes) - - def find_changes(self): - maybe_modified = self._maybe_modified - modified = dict((f, mt) for f, mt in self._mtimes() - if maybe_modified(f, mt)) - if modified: - self.on_change(modified) - self.modify_times.update(modified) - - def start(self): - while not self.shutdown_event.is_set(): - self.find_changes() - time.sleep(self.interval) - - @staticmethod - def _mtime(path): - try: - return os.stat(path).st_mtime - except Exception: - pass - - -class KQueueMonitor(BaseMonitor): - """File change monitor based on BSD kernel event notifications""" - - def __init__(self, *args, **kwargs): - super(KQueueMonitor, self).__init__(*args, **kwargs) - self.filemap = dict((f, None) for f in self.files) - self.fdmap = {} - - def register_with_event_loop(self, hub): - if eventio.kqueue is not None: - self._kq = eventio._kqueue() - self.add_events(self._kq) - self._kq.on_file_change = self.handle_event - hub.add_reader(self._kq._kqueue, self._kq.poll, 0) - - def on_event_loop_close(self, hub): - self.close(self._kq) - - def add_events(self, poller): - for f in self.filemap: - self.filemap[f] = fd = os.open(f, os.O_RDONLY) - self.fdmap[fd] = f - poller.watch_file(fd) - - def handle_event(self, events): - self.on_change([self.fdmap[e.ident] for e in events]) - - def start(self): - self.poller = eventio.poll() - self.add_events(self.poller) - self.poller.on_file_change = self.handle_event - while not self.shutdown_event.is_set(): - self.poller.poll(1) - - def close(self, poller): - for f, fd in items(self.filemap): - if fd is not None: - poller.unregister(fd) - with ignore_errno('EBADF'): # pragma: no cover - os.close(fd) - self.filemap.clear() - self.fdmap.clear() - - def stop(self): - self.close(self.poller) - self.poller.close() - - -class InotifyMonitor(_ProcessEvent): - """File change monitor based on Linux kernel `inotify` subsystem""" - - def __init__(self, modules, on_change=None, **kwargs): - assert pyinotify - self._modules = modules - self._on_change = on_change - self._wm = None - self._notifier = None - - def register_with_event_loop(self, hub): - self.create_notifier() - hub.add_reader(self._wm.get_fd(), self.on_readable) - - def on_event_loop_close(self, hub): - pass - - def on_readable(self): - self._notifier.read_events() - self._notifier.process_events() - - def create_notifier(self): - self._wm = pyinotify.WatchManager() - self._notifier = pyinotify.Notifier(self._wm, self) - add_watch = self._wm.add_watch - flags = pyinotify.IN_MODIFY | pyinotify.IN_ATTRIB - for m in self._modules: - add_watch(m, flags) - - def start(self): - try: - self.create_notifier() - self._notifier.loop() - finally: - if self._wm: - self._wm.close() - # Notifier.close is called at the end of Notifier.loop - self._wm = self._notifier = None - - def stop(self): - pass - - def process_(self, event): - self.on_change([event.path]) - - process_IN_ATTRIB = process_IN_MODIFY = process_ - - def on_change(self, modified): - if self._on_change: - return self._on_change(modified) - - -def default_implementation(): - if hasattr(select, 'kqueue') and eventio.kqueue is not None: - return 'kqueue' - elif sys.platform.startswith('linux') and pyinotify: - return 'inotify' - else: - return 'stat' - -implementations = {'kqueue': KQueueMonitor, - 'inotify': InotifyMonitor, - 'stat': StatMonitor} -Monitor = implementations[ - os.environ.get('CELERYD_FSNOTIFY') or default_implementation()] - - -class Autoreloader(bgThread): - """Tracks changes in modules and fires reload commands""" - Monitor = Monitor - - def __init__(self, controller, modules=None, monitor_cls=None, **options): - super(Autoreloader, self).__init__() - self.controller = controller - app = self.controller.app - self.modules = app.loader.task_modules if modules is None else modules - self.options = options - self._monitor = None - self._hashes = None - self.file_to_module = {} - - def on_init(self): - files = self.file_to_module - files.update(dict( - (module_file(sys.modules[m]), m) for m in self.modules)) - - self._monitor = self.Monitor( - files, self.on_change, - shutdown_event=self._is_shutdown, **self.options) - self._hashes = dict([(f, file_hash(f)) for f in files]) - - def register_with_event_loop(self, hub): - if self._monitor is None: - self.on_init() - self._monitor.register_with_event_loop(hub) - - def on_event_loop_close(self, hub): - if self._monitor is not None: - self._monitor.on_event_loop_close(hub) - - def body(self): - self.on_init() - with ignore_errno('EINTR', 'EAGAIN'): - self._monitor.start() - - def _maybe_modified(self, f): - if os.path.exists(f): - digest = file_hash(f) - if digest != self._hashes[f]: - self._hashes[f] = digest - return True - return False - - def on_change(self, files): - modified = [f for f in files if self._maybe_modified(f)] - if modified: - names = [self.file_to_module[module] for module in modified] - logger.info('Detected modified modules: %r', names) - self._reload(names) - - def _reload(self, modules): - self.controller.reload(modules, reload=True) - - def stop(self): - if self._monitor: - self._monitor.stop() diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/autoscale.py b/thesisenv/lib/python3.6/site-packages/celery/worker/autoscale.py deleted file mode 100644 index 265feda..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/autoscale.py +++ /dev/null @@ -1,162 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.worker.autoscale - ~~~~~~~~~~~~~~~~~~~~~~~ - - This module implements the internal thread responsible - for growing and shrinking the pool according to the - current autoscale settings. - - The autoscale thread is only enabled if :option:`--autoscale` - has been enabled on the command-line. - -""" -from __future__ import absolute_import - -import os -import threading - -from time import sleep - -from kombu.async.semaphore import DummyLock - -from celery import bootsteps -from celery.five import monotonic -from celery.utils.log import get_logger -from celery.utils.threads import bgThread - -from . import state -from .components import Pool - -__all__ = ['Autoscaler', 'WorkerComponent'] - -logger = get_logger(__name__) -debug, info, error = logger.debug, logger.info, logger.error - -AUTOSCALE_KEEPALIVE = float(os.environ.get('AUTOSCALE_KEEPALIVE', 30)) - - -class WorkerComponent(bootsteps.StartStopStep): - label = 'Autoscaler' - conditional = True - requires = (Pool, ) - - def __init__(self, w, **kwargs): - self.enabled = w.autoscale - w.autoscaler = None - - def create(self, w): - scaler = w.autoscaler = self.instantiate( - w.autoscaler_cls, - w.pool, w.max_concurrency, w.min_concurrency, - worker=w, mutex=DummyLock() if w.use_eventloop else None, - ) - return scaler if not w.use_eventloop else None - - def register_with_event_loop(self, w, hub): - w.consumer.on_task_message.add(w.autoscaler.maybe_scale) - hub.call_repeatedly( - w.autoscaler.keepalive, w.autoscaler.maybe_scale, - ) - - -class Autoscaler(bgThread): - - def __init__(self, pool, max_concurrency, - min_concurrency=0, worker=None, - keepalive=AUTOSCALE_KEEPALIVE, mutex=None): - super(Autoscaler, self).__init__() - self.pool = pool - self.mutex = mutex or threading.Lock() - self.max_concurrency = max_concurrency - self.min_concurrency = min_concurrency - self.keepalive = keepalive - self._last_action = None - self.worker = worker - - assert self.keepalive, 'cannot scale down too fast.' - - def body(self): - with self.mutex: - self.maybe_scale() - sleep(1.0) - - def _maybe_scale(self, req=None): - procs = self.processes - cur = min(self.qty, self.max_concurrency) - if cur > procs: - self.scale_up(cur - procs) - return True - elif cur < procs: - self.scale_down((procs - cur) - self.min_concurrency) - return True - - def maybe_scale(self, req=None): - if self._maybe_scale(req): - self.pool.maintain_pool() - - def update(self, max=None, min=None): - with self.mutex: - if max is not None: - if max < self.max_concurrency: - self._shrink(self.processes - max) - self.max_concurrency = max - if min is not None: - if min > self.min_concurrency: - self._grow(min - self.min_concurrency) - self.min_concurrency = min - return self.max_concurrency, self.min_concurrency - - def force_scale_up(self, n): - with self.mutex: - new = self.processes + n - if new > self.max_concurrency: - self.max_concurrency = new - self.min_concurrency += 1 - self._grow(n) - - def force_scale_down(self, n): - with self.mutex: - new = self.processes - n - if new < self.min_concurrency: - self.min_concurrency = max(new, 0) - self._shrink(min(n, self.processes)) - - def scale_up(self, n): - self._last_action = monotonic() - return self._grow(n) - - def scale_down(self, n): - if n and self._last_action and ( - monotonic() - self._last_action > self.keepalive): - self._last_action = monotonic() - return self._shrink(n) - - def _grow(self, n): - info('Scaling up %s processes.', n) - self.pool.grow(n) - self.worker.consumer._update_prefetch_count(n) - - def _shrink(self, n): - info('Scaling down %s processes.', n) - try: - self.pool.shrink(n) - except ValueError: - debug("Autoscaler won't scale down: all processes busy.") - except Exception as exc: - error('Autoscaler: scale_down: %r', exc, exc_info=True) - self.worker.consumer._update_prefetch_count(-n) - - def info(self): - return {'max': self.max_concurrency, - 'min': self.min_concurrency, - 'current': self.processes, - 'qty': self.qty} - - @property - def qty(self): - return len(state.reserved_requests) - - @property - def processes(self): - return self.pool.num_processes diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/components.py b/thesisenv/lib/python3.6/site-packages/celery/worker/components.py deleted file mode 100644 index bb02f4e..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/components.py +++ /dev/null @@ -1,247 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.worker.components - ~~~~~~~~~~~~~~~~~~~~~~~~ - - Default worker bootsteps. - -""" -from __future__ import absolute_import - -import atexit -import warnings - -from kombu.async import Hub as _Hub, get_event_loop, set_event_loop -from kombu.async.semaphore import DummyLock, LaxBoundedSemaphore -from kombu.async.timer import Timer as _Timer - -from celery import bootsteps -from celery._state import _set_task_join_will_block -from celery.exceptions import ImproperlyConfigured -from celery.five import string_t -from celery.utils.log import worker_logger as logger - -__all__ = ['Timer', 'Hub', 'Queues', 'Pool', 'Beat', 'StateDB', 'Consumer'] - -ERR_B_GREEN = """\ --B option doesn't work with eventlet/gevent pools: \ -use standalone beat instead.\ -""" - -W_POOL_SETTING = """ -The CELERYD_POOL setting should not be used to select the eventlet/gevent -pools, instead you *must use the -P* argument so that patches are applied -as early as possible. -""" - - -class Timer(bootsteps.Step): - """This step initializes the internal timer used by the worker.""" - - def create(self, w): - if w.use_eventloop: - # does not use dedicated timer thread. - w.timer = _Timer(max_interval=10.0) - else: - if not w.timer_cls: - # Default Timer is set by the pool, as e.g. eventlet - # needs a custom implementation. - w.timer_cls = w.pool_cls.Timer - w.timer = self.instantiate(w.timer_cls, - max_interval=w.timer_precision, - on_timer_error=self.on_timer_error, - on_timer_tick=self.on_timer_tick) - - def on_timer_error(self, exc): - logger.error('Timer error: %r', exc, exc_info=True) - - def on_timer_tick(self, delay): - logger.debug('Timer wake-up! Next eta %s secs.', delay) - - -class Hub(bootsteps.StartStopStep): - requires = (Timer, ) - - def __init__(self, w, **kwargs): - w.hub = None - - def include_if(self, w): - return w.use_eventloop - - def create(self, w): - w.hub = get_event_loop() - if w.hub is None: - w.hub = set_event_loop(_Hub(w.timer)) - self._patch_thread_primitives(w) - return self - - def start(self, w): - pass - - def stop(self, w): - w.hub.close() - - def terminate(self, w): - w.hub.close() - - def _patch_thread_primitives(self, w): - # make clock use dummy lock - w.app.clock.mutex = DummyLock() - # multiprocessing's ApplyResult uses this lock. - try: - from billiard import pool - except ImportError: - pass - else: - pool.Lock = DummyLock - - -class Queues(bootsteps.Step): - """This bootstep initializes the internal queues - used by the worker.""" - label = 'Queues (intra)' - requires = (Hub, ) - - def create(self, w): - w.process_task = w._process_task - if w.use_eventloop: - if w.pool_putlocks and w.pool_cls.uses_semaphore: - w.process_task = w._process_task_sem - - -class Pool(bootsteps.StartStopStep): - """Bootstep managing the worker pool. - - Describes how to initialize the worker pool, and starts and stops - the pool during worker startup/shutdown. - - Adds attributes: - - * autoscale - * pool - * max_concurrency - * min_concurrency - - """ - requires = (Queues, ) - - def __init__(self, w, autoscale=None, autoreload=None, - no_execv=False, optimization=None, **kwargs): - if isinstance(autoscale, string_t): - max_c, _, min_c = autoscale.partition(',') - autoscale = [int(max_c), min_c and int(min_c) or 0] - w.autoscale = autoscale - w.pool = None - w.max_concurrency = None - w.min_concurrency = w.concurrency - w.no_execv = no_execv - if w.autoscale: - w.max_concurrency, w.min_concurrency = w.autoscale - self.autoreload_enabled = autoreload - self.optimization = optimization - - def close(self, w): - if w.pool: - w.pool.close() - - def terminate(self, w): - if w.pool: - w.pool.terminate() - - def create(self, w, semaphore=None, max_restarts=None): - if w.app.conf.CELERYD_POOL in ('eventlet', 'gevent'): - warnings.warn(UserWarning(W_POOL_SETTING)) - threaded = not w.use_eventloop - procs = w.min_concurrency - forking_enable = w.no_execv if w.force_execv else True - if not threaded: - semaphore = w.semaphore = LaxBoundedSemaphore(procs) - w._quick_acquire = w.semaphore.acquire - w._quick_release = w.semaphore.release - max_restarts = 100 - allow_restart = self.autoreload_enabled or w.pool_restarts - pool = w.pool = self.instantiate( - w.pool_cls, w.min_concurrency, - initargs=(w.app, w.hostname), - maxtasksperchild=w.max_tasks_per_child, - timeout=w.task_time_limit, - soft_timeout=w.task_soft_time_limit, - putlocks=w.pool_putlocks and threaded, - lost_worker_timeout=w.worker_lost_wait, - threads=threaded, - max_restarts=max_restarts, - allow_restart=allow_restart, - forking_enable=forking_enable, - semaphore=semaphore, - sched_strategy=self.optimization, - ) - _set_task_join_will_block(pool.task_join_will_block) - return pool - - def info(self, w): - return {'pool': w.pool.info if w.pool else 'N/A'} - - def register_with_event_loop(self, w, hub): - w.pool.register_with_event_loop(hub) - - -class Beat(bootsteps.StartStopStep): - """Step used to embed a beat process. - - This will only be enabled if the ``beat`` - argument is set. - - """ - label = 'Beat' - conditional = True - - def __init__(self, w, beat=False, **kwargs): - self.enabled = w.beat = beat - w.beat = None - - def create(self, w): - from celery.beat import EmbeddedService - if w.pool_cls.__module__.endswith(('gevent', 'eventlet')): - raise ImproperlyConfigured(ERR_B_GREEN) - b = w.beat = EmbeddedService(w.app, - schedule_filename=w.schedule_filename, - scheduler_cls=w.scheduler_cls) - return b - - -class StateDB(bootsteps.Step): - """This bootstep sets up the workers state db if enabled.""" - - def __init__(self, w, **kwargs): - self.enabled = w.state_db - w._persistence = None - - def create(self, w): - w._persistence = w.state.Persistent(w.state, w.state_db, w.app.clock) - atexit.register(w._persistence.save) - - -class Consumer(bootsteps.StartStopStep): - last = True - - def create(self, w): - if w.max_concurrency: - prefetch_count = max(w.min_concurrency, 1) * w.prefetch_multiplier - else: - prefetch_count = w.concurrency * w.prefetch_multiplier - c = w.consumer = self.instantiate( - w.consumer_cls, w.process_task, - hostname=w.hostname, - send_events=w.send_events, - init_callback=w.ready_callback, - initial_prefetch_count=prefetch_count, - pool=w.pool, - timer=w.timer, - app=w.app, - controller=w, - hub=w.hub, - worker_options=w.options, - disable_rate_limits=w.disable_rate_limits, - prefetch_multiplier=w.prefetch_multiplier, - ) - return c diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/consumer.py b/thesisenv/lib/python3.6/site-packages/celery/worker/consumer.py deleted file mode 100644 index cc93d6c..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/consumer.py +++ /dev/null @@ -1,887 +0,0 @@ -# -*- coding: utf-8 -*- -""" -celery.worker.consumer -~~~~~~~~~~~~~~~~~~~~~~ - -This module contains the components responsible for consuming messages -from the broker, processing the messages and keeping the broker connections -up and running. - -""" -from __future__ import absolute_import - -import errno -import kombu -import logging -import os -import socket - -from collections import defaultdict -from functools import partial -from heapq import heappush -from operator import itemgetter -from time import sleep - -from billiard.common import restart_state -from billiard.exceptions import RestartFreqExceeded -from kombu.async.semaphore import DummyLock -from kombu.common import QoS, ignore_errors -from kombu.syn import _detect_environment -from kombu.utils.compat import get_errno -from kombu.utils.encoding import safe_repr, bytes_t -from kombu.utils.limits import TokenBucket - -from celery import chain -from celery import bootsteps -from celery.app.trace import build_tracer -from celery.canvas import signature -from celery.exceptions import InvalidTaskError -from celery.five import items, values -from celery.utils.functional import noop -from celery.utils.log import get_logger -from celery.utils.objects import Bunch -from celery.utils.text import truncate -from celery.utils.timeutils import humanize_seconds, rate - -from . import heartbeat, loops, pidbox -from .state import task_reserved, maybe_shutdown, revoked, reserved_requests - -try: - buffer_t = buffer -except NameError: # pragma: no cover - # Py3 does not have buffer, but we only need isinstance. - - class buffer_t(object): # noqa - pass - -__all__ = [ - 'Consumer', 'Connection', 'Events', 'Heart', 'Control', - 'Tasks', 'Evloop', 'Agent', 'Mingle', 'Gossip', 'dump_body', -] - -CLOSE = bootsteps.CLOSE -logger = get_logger(__name__) -debug, info, warn, error, crit = (logger.debug, logger.info, logger.warning, - logger.error, logger.critical) - -CONNECTION_RETRY = """\ -consumer: Connection to broker lost. \ -Trying to re-establish the connection...\ -""" - -CONNECTION_RETRY_STEP = """\ -Trying again {when}...\ -""" - -CONNECTION_ERROR = """\ -consumer: Cannot connect to %s: %s. -%s -""" - -CONNECTION_FAILOVER = """\ -Will retry using next failover.\ -""" - -UNKNOWN_FORMAT = """\ -Received and deleted unknown message. Wrong destination?!? - -The full contents of the message body was: %s -""" - -#: Error message for when an unregistered task is received. -UNKNOWN_TASK_ERROR = """\ -Received unregistered task of type %s. -The message has been ignored and discarded. - -Did you remember to import the module containing this task? -Or maybe you are using relative imports? -Please see http://bit.ly/gLye1c for more information. - -The full contents of the message body was: -%s -""" - -#: Error message for when an invalid task message is received. -INVALID_TASK_ERROR = """\ -Received invalid task message: %s -The message has been ignored and discarded. - -Please ensure your message conforms to the task -message protocol as described here: http://bit.ly/hYj41y - -The full contents of the message body was: -%s -""" - -MESSAGE_DECODE_ERROR = """\ -Can't decode message body: %r [type:%r encoding:%r headers:%s] - -body: %s -""" - -MESSAGE_REPORT = """\ -body: {0} -{{content_type:{1} content_encoding:{2} - delivery_info:{3} headers={4}}} -""" - -MINGLE_GET_FIELDS = itemgetter('clock', 'revoked') - - -def dump_body(m, body): - if isinstance(body, buffer_t): - body = bytes_t(body) - return '{0} ({1}b)'.format(truncate(safe_repr(body), 1024), - len(m.body)) - - -class Consumer(object): - Strategies = dict - - #: set when consumer is shutting down. - in_shutdown = False - - #: Optional callback called the first time the worker - #: is ready to receive tasks. - init_callback = None - - #: The current worker pool instance. - pool = None - - #: A timer used for high-priority internal tasks, such - #: as sending heartbeats. - timer = None - - restart_count = -1 # first start is the same as a restart - - class Blueprint(bootsteps.Blueprint): - name = 'Consumer' - default_steps = [ - 'celery.worker.consumer:Connection', - 'celery.worker.consumer:Mingle', - 'celery.worker.consumer:Events', - 'celery.worker.consumer:Gossip', - 'celery.worker.consumer:Heart', - 'celery.worker.consumer:Control', - 'celery.worker.consumer:Tasks', - 'celery.worker.consumer:Evloop', - 'celery.worker.consumer:Agent', - ] - - def shutdown(self, parent): - self.send_all(parent, 'shutdown') - - def __init__(self, on_task_request, - init_callback=noop, hostname=None, - pool=None, app=None, - timer=None, controller=None, hub=None, amqheartbeat=None, - worker_options=None, disable_rate_limits=False, - initial_prefetch_count=2, prefetch_multiplier=1, **kwargs): - self.app = app - self.controller = controller - self.init_callback = init_callback - self.hostname = hostname or socket.gethostname() - self.pid = os.getpid() - self.pool = pool - self.timer = timer - self.strategies = self.Strategies() - conninfo = self.app.connection() - self.connection_errors = conninfo.connection_errors - self.channel_errors = conninfo.channel_errors - self._restart_state = restart_state(maxR=5, maxT=1) - - self._does_info = logger.isEnabledFor(logging.INFO) - self.on_task_request = on_task_request - self.on_task_message = set() - self.amqheartbeat_rate = self.app.conf.BROKER_HEARTBEAT_CHECKRATE - self.disable_rate_limits = disable_rate_limits - self.initial_prefetch_count = initial_prefetch_count - self.prefetch_multiplier = prefetch_multiplier - - # this contains a tokenbucket for each task type by name, used for - # rate limits, or None if rate limits are disabled for that task. - self.task_buckets = defaultdict(lambda: None) - self.reset_rate_limits() - - self.hub = hub - if self.hub: - self.amqheartbeat = amqheartbeat - if self.amqheartbeat is None: - self.amqheartbeat = self.app.conf.BROKER_HEARTBEAT - else: - self.amqheartbeat = 0 - - if not hasattr(self, 'loop'): - self.loop = loops.asynloop if hub else loops.synloop - - if _detect_environment() == 'gevent': - # there's a gevent bug that causes timeouts to not be reset, - # so if the connection timeout is exceeded once, it can NEVER - # connect again. - self.app.conf.BROKER_CONNECTION_TIMEOUT = None - - self.steps = [] - self.blueprint = self.Blueprint( - app=self.app, on_close=self.on_close, - ) - self.blueprint.apply(self, **dict(worker_options or {}, **kwargs)) - - def bucket_for_task(self, type): - limit = rate(getattr(type, 'rate_limit', None)) - return TokenBucket(limit, capacity=1) if limit else None - - def reset_rate_limits(self): - self.task_buckets.update( - (n, self.bucket_for_task(t)) for n, t in items(self.app.tasks) - ) - - def _update_prefetch_count(self, index=0): - """Update prefetch count after pool/shrink grow operations. - - Index must be the change in number of processes as a positive - (increasing) or negative (decreasing) number. - - .. note:: - - Currently pool grow operations will end up with an offset - of +1 if the initial size of the pool was 0 (e.g. - ``--autoscale=1,0``). - - """ - num_processes = self.pool.num_processes - if not self.initial_prefetch_count or not num_processes: - return # prefetch disabled - self.initial_prefetch_count = ( - self.pool.num_processes * self.prefetch_multiplier - ) - return self._update_qos_eventually(index) - - def _update_qos_eventually(self, index): - return (self.qos.decrement_eventually if index < 0 - else self.qos.increment_eventually)( - abs(index) * self.prefetch_multiplier) - - def _limit_task(self, request, bucket, tokens): - if not bucket.can_consume(tokens): - hold = bucket.expected_time(tokens) - self.timer.call_after( - hold, self._limit_task, (request, bucket, tokens), - ) - else: - task_reserved(request) - self.on_task_request(request) - - def start(self): - blueprint = self.blueprint - while blueprint.state != CLOSE: - self.restart_count += 1 - maybe_shutdown() - try: - blueprint.start(self) - except self.connection_errors as exc: - if isinstance(exc, OSError) and get_errno(exc) == errno.EMFILE: - raise # Too many open files - maybe_shutdown() - try: - self._restart_state.step() - except RestartFreqExceeded as exc: - crit('Frequent restarts detected: %r', exc, exc_info=1) - sleep(1) - if blueprint.state != CLOSE and self.connection: - warn(CONNECTION_RETRY, exc_info=True) - try: - self.connection.collect() - except Exception: - pass - self.on_close() - blueprint.restart(self) - - def register_with_event_loop(self, hub): - self.blueprint.send_all( - self, 'register_with_event_loop', args=(hub, ), - description='Hub.register', - ) - - def shutdown(self): - self.in_shutdown = True - self.blueprint.shutdown(self) - - def stop(self): - self.blueprint.stop(self) - - def on_ready(self): - callback, self.init_callback = self.init_callback, None - if callback: - callback(self) - - def loop_args(self): - return (self, self.connection, self.task_consumer, - self.blueprint, self.hub, self.qos, self.amqheartbeat, - self.app.clock, self.amqheartbeat_rate) - - def on_decode_error(self, message, exc): - """Callback called if an error occurs while decoding - a message received. - - Simply logs the error and acknowledges the message so it - doesn't enter a loop. - - :param message: The message with errors. - :param exc: The original exception instance. - - """ - crit(MESSAGE_DECODE_ERROR, - exc, message.content_type, message.content_encoding, - safe_repr(message.headers), dump_body(message, message.body), - exc_info=1) - message.ack() - - def on_close(self): - # Clear internal queues to get rid of old messages. - # They can't be acked anyway, as a delivery tag is specific - # to the current channel. - if self.controller and self.controller.semaphore: - self.controller.semaphore.clear() - if self.timer: - self.timer.clear() - reserved_requests.clear() - if self.pool and self.pool.flush: - self.pool.flush() - - def connect(self): - """Establish the broker connection. - - Will retry establishing the connection if the - :setting:`BROKER_CONNECTION_RETRY` setting is enabled - - """ - conn = self.app.connection(heartbeat=self.amqheartbeat) - - # Callback called for each retry while the connection - # can't be established. - def _error_handler(exc, interval, next_step=CONNECTION_RETRY_STEP): - if getattr(conn, 'alt', None) and interval == 0: - next_step = CONNECTION_FAILOVER - error(CONNECTION_ERROR, conn.as_uri(), exc, - next_step.format(when=humanize_seconds(interval, 'in', ' '))) - - # remember that the connection is lazy, it won't establish - # until needed. - if not self.app.conf.BROKER_CONNECTION_RETRY: - # retry disabled, just call connect directly. - conn.connect() - return conn - - conn = conn.ensure_connection( - _error_handler, self.app.conf.BROKER_CONNECTION_MAX_RETRIES, - callback=maybe_shutdown, - ) - if self.hub: - conn.transport.register_with_event_loop(conn.connection, self.hub) - return conn - - def add_task_queue(self, queue, exchange=None, exchange_type=None, - routing_key=None, **options): - cset = self.task_consumer - queues = self.app.amqp.queues - # Must use in' here, as __missing__ will automatically - # create queues when CELERY_CREATE_MISSING_QUEUES is enabled. - # (Issue #1079) - if queue in queues: - q = queues[queue] - else: - exchange = queue if exchange is None else exchange - exchange_type = ('direct' if exchange_type is None - else exchange_type) - q = queues.select_add(queue, - exchange=exchange, - exchange_type=exchange_type, - routing_key=routing_key, **options) - if not cset.consuming_from(queue): - cset.add_queue(q) - cset.consume() - info('Started consuming from %s', queue) - - def cancel_task_queue(self, queue): - info('Canceling queue %s', queue) - self.app.amqp.queues.deselect(queue) - self.task_consumer.cancel_by_queue(queue) - - def apply_eta_task(self, task): - """Method called by the timer to apply a task with an - ETA/countdown.""" - task_reserved(task) - self.on_task_request(task) - self.qos.decrement_eventually() - - def _message_report(self, body, message): - return MESSAGE_REPORT.format(dump_body(message, body), - safe_repr(message.content_type), - safe_repr(message.content_encoding), - safe_repr(message.delivery_info), - safe_repr(message.headers)) - - def on_unknown_message(self, body, message): - warn(UNKNOWN_FORMAT, self._message_report(body, message)) - message.reject_log_error(logger, self.connection_errors) - - def on_unknown_task(self, body, message, exc): - error(UNKNOWN_TASK_ERROR, exc, dump_body(message, body), exc_info=True) - message.reject_log_error(logger, self.connection_errors) - - def on_invalid_task(self, body, message, exc): - error(INVALID_TASK_ERROR, exc, dump_body(message, body), exc_info=True) - message.reject_log_error(logger, self.connection_errors) - - def update_strategies(self): - loader = self.app.loader - for name, task in items(self.app.tasks): - self.strategies[name] = task.start_strategy(self.app, self) - task.__trace__ = build_tracer(name, task, loader, self.hostname, - app=self.app) - - def create_task_handler(self): - strategies = self.strategies - on_unknown_message = self.on_unknown_message - on_unknown_task = self.on_unknown_task - on_invalid_task = self.on_invalid_task - callbacks = self.on_task_message - - def on_task_received(body, message): - headers = message.headers - try: - type_, is_proto2 = body['task'], 0 - except (KeyError, TypeError): - try: - type_, is_proto2 = headers['task'], 1 - except (KeyError, TypeError): - return on_unknown_message(body, message) - - if is_proto2: - body = proto2_to_proto1( - self.app, type_, body, message, headers) - - try: - strategies[type_](message, body, - message.ack_log_error, - message.reject_log_error, - callbacks) - except KeyError as exc: - on_unknown_task(body, message, exc) - except InvalidTaskError as exc: - on_invalid_task(body, message, exc) - - return on_task_received - - def __repr__(self): - return ''.format( - self=self, state=self.blueprint.human_state(), - ) - - -def proto2_to_proto1(app, type_, body, message, headers): - args, kwargs, embed = body - embedded = _extract_proto2_embed(**embed) - chained = embedded.pop('chain') - new_body = dict( - _extract_proto2_headers(type_, **headers), - args=args, - kwargs=kwargs, - **embedded) - if chained: - new_body['callbacks'].append(chain(chained, app=app)) - return new_body - - -def _extract_proto2_headers(type_, id, retries, eta, expires, - group, timelimit, **_): - return { - 'id': id, - 'task': type_, - 'retries': retries, - 'eta': eta, - 'expires': expires, - 'utc': True, - 'taskset': group, - 'timelimit': timelimit, - } - - -def _extract_proto2_embed(callbacks, errbacks, chain, chord, **_): - return { - 'callbacks': callbacks or [], - 'errbacks': errbacks, - 'chain': chain, - 'chord': chord, - } - - -class Connection(bootsteps.StartStopStep): - - def __init__(self, c, **kwargs): - c.connection = None - - def start(self, c): - c.connection = c.connect() - info('Connected to %s', c.connection.as_uri()) - - def shutdown(self, c): - # We must set self.connection to None here, so - # that the green pidbox thread exits. - connection, c.connection = c.connection, None - if connection: - ignore_errors(connection, connection.close) - - def info(self, c, params='N/A'): - if c.connection: - params = c.connection.info() - params.pop('password', None) # don't send password. - return {'broker': params} - - -class Events(bootsteps.StartStopStep): - requires = (Connection, ) - - def __init__(self, c, send_events=None, **kwargs): - self.send_events = True - self.groups = None if send_events else ['worker'] - c.event_dispatcher = None - - def start(self, c): - # flush events sent while connection was down. - prev = self._close(c) - dis = c.event_dispatcher = c.app.events.Dispatcher( - c.connect(), hostname=c.hostname, - enabled=self.send_events, groups=self.groups, - ) - if prev: - dis.extend_buffer(prev) - dis.flush() - - def stop(self, c): - pass - - def _close(self, c): - if c.event_dispatcher: - dispatcher = c.event_dispatcher - # remember changes from remote control commands: - self.groups = dispatcher.groups - - # close custom connection - if dispatcher.connection: - ignore_errors(c, dispatcher.connection.close) - ignore_errors(c, dispatcher.close) - c.event_dispatcher = None - return dispatcher - - def shutdown(self, c): - self._close(c) - - -class Heart(bootsteps.StartStopStep): - requires = (Events, ) - - def __init__(self, c, without_heartbeat=False, heartbeat_interval=None, - **kwargs): - self.enabled = not without_heartbeat - self.heartbeat_interval = heartbeat_interval - c.heart = None - - def start(self, c): - c.heart = heartbeat.Heart( - c.timer, c.event_dispatcher, self.heartbeat_interval, - ) - c.heart.start() - - def stop(self, c): - c.heart = c.heart and c.heart.stop() - shutdown = stop - - -class Mingle(bootsteps.StartStopStep): - label = 'Mingle' - requires = (Events, ) - compatible_transports = set(['amqp', 'redis']) - - def __init__(self, c, without_mingle=False, **kwargs): - self.enabled = not without_mingle and self.compatible_transport(c.app) - - def compatible_transport(self, app): - with app.connection() as conn: - return conn.transport.driver_type in self.compatible_transports - - def start(self, c): - info('mingle: searching for neighbors') - I = c.app.control.inspect(timeout=1.0, connection=c.connection) - replies = I.hello(c.hostname, revoked._data) or {} - replies.pop(c.hostname, None) - if replies: - info('mingle: sync with %s nodes', - len([reply for reply, value in items(replies) if value])) - for reply in values(replies): - if reply: - try: - other_clock, other_revoked = MINGLE_GET_FIELDS(reply) - except KeyError: # reply from pre-3.1 worker - pass - else: - c.app.clock.adjust(other_clock) - revoked.update(other_revoked) - info('mingle: sync complete') - else: - info('mingle: all alone') - - -class Tasks(bootsteps.StartStopStep): - requires = (Mingle, ) - - def __init__(self, c, **kwargs): - c.task_consumer = c.qos = None - - def start(self, c): - c.update_strategies() - - # - RabbitMQ 3.3 completely redefines how basic_qos works.. - # This will detect if the new qos smenatics is in effect, - # and if so make sure the 'apply_global' flag is set on qos updates. - qos_global = not c.connection.qos_semantics_matches_spec - - # set initial prefetch count - c.connection.default_channel.basic_qos( - 0, c.initial_prefetch_count, qos_global, - ) - - c.task_consumer = c.app.amqp.TaskConsumer( - c.connection, on_decode_error=c.on_decode_error, - ) - - def set_prefetch_count(prefetch_count): - return c.task_consumer.qos( - prefetch_count=prefetch_count, - apply_global=qos_global, - ) - c.qos = QoS(set_prefetch_count, c.initial_prefetch_count) - - def stop(self, c): - if c.task_consumer: - debug('Canceling task consumer...') - ignore_errors(c, c.task_consumer.cancel) - - def shutdown(self, c): - if c.task_consumer: - self.stop(c) - debug('Closing consumer channel...') - ignore_errors(c, c.task_consumer.close) - c.task_consumer = None - - def info(self, c): - return {'prefetch_count': c.qos.value if c.qos else 'N/A'} - - -class Agent(bootsteps.StartStopStep): - conditional = True - requires = (Connection, ) - - def __init__(self, c, **kwargs): - self.agent_cls = self.enabled = c.app.conf.CELERYD_AGENT - - def create(self, c): - agent = c.agent = self.instantiate(self.agent_cls, c.connection) - return agent - - -class Control(bootsteps.StartStopStep): - requires = (Tasks, ) - - def __init__(self, c, **kwargs): - self.is_green = c.pool is not None and c.pool.is_green - self.box = (pidbox.gPidbox if self.is_green else pidbox.Pidbox)(c) - self.start = self.box.start - self.stop = self.box.stop - self.shutdown = self.box.shutdown - - def include_if(self, c): - return c.app.conf.CELERY_ENABLE_REMOTE_CONTROL - - -class Gossip(bootsteps.ConsumerStep): - label = 'Gossip' - requires = (Mingle, ) - _cons_stamp_fields = itemgetter( - 'id', 'clock', 'hostname', 'pid', 'topic', 'action', 'cver', - ) - compatible_transports = set(['amqp', 'redis']) - - def __init__(self, c, without_gossip=False, interval=5.0, **kwargs): - self.enabled = not without_gossip and self.compatible_transport(c.app) - self.app = c.app - c.gossip = self - self.Receiver = c.app.events.Receiver - self.hostname = c.hostname - self.full_hostname = '.'.join([self.hostname, str(c.pid)]) - self.on = Bunch( - node_join=set(), - node_leave=set(), - node_lost=set(), - ) - - self.timer = c.timer - if self.enabled: - self.state = c.app.events.State( - on_node_join=self.on_node_join, - on_node_leave=self.on_node_leave, - max_tasks_in_memory=1, - ) - if c.hub: - c._mutex = DummyLock() - self.update_state = self.state.event - self.interval = interval - self._tref = None - self.consensus_requests = defaultdict(list) - self.consensus_replies = {} - self.event_handlers = { - 'worker.elect': self.on_elect, - 'worker.elect.ack': self.on_elect_ack, - } - self.clock = c.app.clock - - self.election_handlers = { - 'task': self.call_task - } - - def compatible_transport(self, app): - with app.connection() as conn: - return conn.transport.driver_type in self.compatible_transports - - def election(self, id, topic, action=None): - self.consensus_replies[id] = [] - self.dispatcher.send( - 'worker-elect', - id=id, topic=topic, action=action, cver=1, - ) - - def call_task(self, task): - try: - signature(task, app=self.app).apply_async() - except Exception as exc: - error('Could not call task: %r', exc, exc_info=1) - - def on_elect(self, event): - try: - (id_, clock, hostname, pid, - topic, action, _) = self._cons_stamp_fields(event) - except KeyError as exc: - return error('election request missing field %s', exc, exc_info=1) - heappush( - self.consensus_requests[id_], - (clock, '%s.%s' % (hostname, pid), topic, action), - ) - self.dispatcher.send('worker-elect-ack', id=id_) - - def start(self, c): - super(Gossip, self).start(c) - self.dispatcher = c.event_dispatcher - - def on_elect_ack(self, event): - id = event['id'] - try: - replies = self.consensus_replies[id] - except KeyError: - return # not for us - alive_workers = self.state.alive_workers() - replies.append(event['hostname']) - - if len(replies) >= len(alive_workers): - _, leader, topic, action = self.clock.sort_heap( - self.consensus_requests[id], - ) - if leader == self.full_hostname: - info('I won the election %r', id) - try: - handler = self.election_handlers[topic] - except KeyError: - error('Unknown election topic %r', topic, exc_info=1) - else: - handler(action) - else: - info('node %s elected for %r', leader, id) - self.consensus_requests.pop(id, None) - self.consensus_replies.pop(id, None) - - def on_node_join(self, worker): - debug('%s joined the party', worker.hostname) - self._call_handlers(self.on.node_join, worker) - - def on_node_leave(self, worker): - debug('%s left', worker.hostname) - self._call_handlers(self.on.node_leave, worker) - - def on_node_lost(self, worker): - info('missed heartbeat from %s', worker.hostname) - self._call_handlers(self.on.node_lost, worker) - - def _call_handlers(self, handlers, *args, **kwargs): - for handler in handlers: - try: - handler(*args, **kwargs) - except Exception as exc: - error('Ignored error from handler %r: %r', - handler, exc, exc_info=1) - - def register_timer(self): - if self._tref is not None: - self._tref.cancel() - self._tref = self.timer.call_repeatedly(self.interval, self.periodic) - - def periodic(self): - workers = self.state.workers - dirty = set() - for worker in values(workers): - if not worker.alive: - dirty.add(worker) - self.on_node_lost(worker) - for worker in dirty: - workers.pop(worker.hostname, None) - - def get_consumers(self, channel): - self.register_timer() - ev = self.Receiver(channel, routing_key='worker.#') - return [kombu.Consumer( - channel, - queues=[ev.queue], - on_message=partial(self.on_message, ev.event_from_message), - no_ack=True - )] - - def on_message(self, prepare, message): - _type = message.delivery_info['routing_key'] - - # For redis when `fanout_patterns=False` (See Issue #1882) - if _type.split('.', 1)[0] == 'task': - return - try: - handler = self.event_handlers[_type] - except KeyError: - pass - else: - return handler(message.payload) - - hostname = (message.headers.get('hostname') or - message.payload['hostname']) - if hostname != self.hostname: - type, event = prepare(message.payload) - self.update_state(event) - else: - self.clock.forward() - - -class Evloop(bootsteps.StartStopStep): - label = 'event loop' - last = True - - def start(self, c): - self.patch_all(c) - c.loop(*c.loop_args()) - - def patch_all(self, c): - c.qos._mutex = DummyLock() diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/control.py b/thesisenv/lib/python3.6/site-packages/celery/worker/control.py deleted file mode 100644 index e8b033d..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/control.py +++ /dev/null @@ -1,385 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.worker.control - ~~~~~~~~~~~~~~~~~~~~~ - - Remote control commands. - -""" -from __future__ import absolute_import - -import io -import tempfile - -from kombu.utils.encoding import safe_repr - -from celery.exceptions import WorkerShutdown -from celery.five import UserDict, items, string_t -from celery.platforms import signals as _signals -from celery.utils import timeutils -from celery.utils.functional import maybe_list -from celery.utils.log import get_logger -from celery.utils import jsonify - -from . import state as worker_state -from .state import revoked -from .job import Request - -__all__ = ['Panel'] -DEFAULT_TASK_INFO_ITEMS = ('exchange', 'routing_key', 'rate_limit') -logger = get_logger(__name__) - - -class Panel(UserDict): - data = dict() # Global registry. - - @classmethod - def register(cls, method, name=None): - cls.data[name or method.__name__] = method - return method - - -def _find_requests_by_id(ids, requests): - found, total = 0, len(ids) - for request in requests: - if request.id in ids: - yield request - found += 1 - if found >= total: - break - - -@Panel.register -def query_task(state, ids, **kwargs): - ids = maybe_list(ids) - - def reqinfo(state, req): - return state, req.info() - - reqs = dict((req.id, ('reserved', req.info())) - for req in _find_requests_by_id( - ids, worker_state.reserved_requests)) - reqs.update(dict( - (req.id, ('active', req.info())) - for req in _find_requests_by_id( - ids, worker_state.active_requests, - ) - )) - - return reqs - - -@Panel.register -def revoke(state, task_id, terminate=False, signal=None, **kwargs): - """Revoke task by task id.""" - # supports list argument since 3.1 - task_ids, task_id = set(maybe_list(task_id) or []), None - size = len(task_ids) - terminated = set() - - revoked.update(task_ids) - if terminate: - signum = _signals.signum(signal or 'TERM') - # reserved_requests changes size during iteration - # so need to consume the items first, then terminate after. - requests = set(_find_requests_by_id( - task_ids, - worker_state.reserved_requests, - )) - for request in requests: - if request.id not in terminated: - terminated.add(request.id) - logger.info('Terminating %s (%s)', request.id, signum) - request.terminate(state.consumer.pool, signal=signum) - if len(terminated) >= size: - break - - if not terminated: - return {'ok': 'terminate: tasks unknown'} - return {'ok': 'terminate: {0}'.format(', '.join(terminated))} - - idstr = ', '.join(task_ids) - logger.info('Tasks flagged as revoked: %s', idstr) - return {'ok': 'tasks {0} flagged as revoked'.format(idstr)} - - -@Panel.register -def report(state): - return {'ok': state.app.bugreport()} - - -@Panel.register -def enable_events(state): - dispatcher = state.consumer.event_dispatcher - if dispatcher.groups and 'task' not in dispatcher.groups: - dispatcher.groups.add('task') - logger.info('Events of group {task} enabled by remote.') - return {'ok': 'task events enabled'} - return {'ok': 'task events already enabled'} - - -@Panel.register -def disable_events(state): - dispatcher = state.consumer.event_dispatcher - if 'task' in dispatcher.groups: - dispatcher.groups.discard('task') - logger.info('Events of group {task} disabled by remote.') - return {'ok': 'task events disabled'} - return {'ok': 'task events already disabled'} - - -@Panel.register -def heartbeat(state): - logger.debug('Heartbeat requested by remote.') - dispatcher = state.consumer.event_dispatcher - dispatcher.send('worker-heartbeat', freq=5, **worker_state.SOFTWARE_INFO) - - -@Panel.register -def rate_limit(state, task_name, rate_limit, **kwargs): - """Set new rate limit for a task type. - - See :attr:`celery.task.base.Task.rate_limit`. - - :param task_name: Type of task. - :param rate_limit: New rate limit. - - """ - - try: - timeutils.rate(rate_limit) - except ValueError as exc: - return {'error': 'Invalid rate limit string: {0!r}'.format(exc)} - - try: - state.app.tasks[task_name].rate_limit = rate_limit - except KeyError: - logger.error('Rate limit attempt for unknown task %s', - task_name, exc_info=True) - return {'error': 'unknown task'} - - state.consumer.reset_rate_limits() - - if not rate_limit: - logger.info('Rate limits disabled for tasks of type %s', task_name) - return {'ok': 'rate limit disabled successfully'} - - logger.info('New rate limit for tasks of type %s: %s.', - task_name, rate_limit) - return {'ok': 'new rate limit set successfully'} - - -@Panel.register -def time_limit(state, task_name=None, hard=None, soft=None, **kwargs): - try: - task = state.app.tasks[task_name] - except KeyError: - logger.error('Change time limit attempt for unknown task %s', - task_name, exc_info=True) - return {'error': 'unknown task'} - - task.soft_time_limit = soft - task.time_limit = hard - - logger.info('New time limits for tasks of type %s: soft=%s hard=%s', - task_name, soft, hard) - return {'ok': 'time limits set successfully'} - - -@Panel.register -def dump_schedule(state, safe=False, **kwargs): - - def prepare_entries(): - for waiting in state.consumer.timer.schedule.queue: - try: - arg0 = waiting.entry.args[0] - except (IndexError, TypeError): - continue - else: - if isinstance(arg0, Request): - yield {'eta': arg0.eta.isoformat() if arg0.eta else None, - 'priority': waiting.priority, - 'request': arg0.info(safe=safe)} - return list(prepare_entries()) - - -@Panel.register -def dump_reserved(state, safe=False, **kwargs): - reserved = worker_state.reserved_requests - worker_state.active_requests - if not reserved: - return [] - return [request.info(safe=safe) for request in reserved] - - -@Panel.register -def dump_active(state, safe=False, **kwargs): - return [request.info(safe=safe) - for request in worker_state.active_requests] - - -@Panel.register -def stats(state, **kwargs): - return state.consumer.controller.stats() - - -@Panel.register -def objgraph(state, num=200, max_depth=10, type='Request'): # pragma: no cover - try: - import objgraph - except ImportError: - raise ImportError('Requires the objgraph library') - print('Dumping graph for type %r' % (type, )) - with tempfile.NamedTemporaryFile(prefix='cobjg', - suffix='.png', delete=False) as fh: - objects = objgraph.by_type(type)[:num] - objgraph.show_backrefs( - objects, - max_depth=max_depth, highlight=lambda v: v in objects, - filename=fh.name, - ) - return {'filename': fh.name} - - -@Panel.register -def memsample(state, **kwargs): # pragma: no cover - from celery.utils.debug import sample_mem - return sample_mem() - - -@Panel.register -def memdump(state, samples=10, **kwargs): # pragma: no cover - from celery.utils.debug import memdump - out = io.StringIO() - memdump(file=out) - return out.getvalue() - - -@Panel.register -def clock(state, **kwargs): - return {'clock': state.app.clock.value} - - -@Panel.register -def dump_revoked(state, **kwargs): - return list(worker_state.revoked) - - -@Panel.register -def hello(state, from_node, revoked=None, **kwargs): - if from_node != state.hostname: - logger.info('sync with %s', from_node) - if revoked: - worker_state.revoked.update(revoked) - return {'revoked': worker_state.revoked._data, - 'clock': state.app.clock.forward()} - - -@Panel.register -def dump_tasks(state, taskinfoitems=None, builtins=False, **kwargs): - reg = state.app.tasks - taskinfoitems = taskinfoitems or DEFAULT_TASK_INFO_ITEMS - - tasks = reg if builtins else ( - task for task in reg if not task.startswith('celery.')) - - def _extract_info(task): - fields = dict((field, str(getattr(task, field, None))) - for field in taskinfoitems - if getattr(task, field, None) is not None) - if fields: - info = ['='.join(f) for f in items(fields)] - return '{0} [{1}]'.format(task.name, ' '.join(info)) - return task.name - - return [_extract_info(reg[task]) for task in sorted(tasks)] - - -@Panel.register -def ping(state, **kwargs): - return {'ok': 'pong'} - - -@Panel.register -def pool_grow(state, n=1, **kwargs): - if state.consumer.controller.autoscaler: - state.consumer.controller.autoscaler.force_scale_up(n) - else: - state.consumer.pool.grow(n) - state.consumer._update_prefetch_count(n) - return {'ok': 'pool will grow'} - - -@Panel.register -def pool_shrink(state, n=1, **kwargs): - if state.consumer.controller.autoscaler: - state.consumer.controller.autoscaler.force_scale_down(n) - else: - state.consumer.pool.shrink(n) - state.consumer._update_prefetch_count(-n) - return {'ok': 'pool will shrink'} - - -@Panel.register -def pool_restart(state, modules=None, reload=False, reloader=None, **kwargs): - if state.app.conf.CELERYD_POOL_RESTARTS: - state.consumer.controller.reload(modules, reload, reloader=reloader) - return {'ok': 'reload started'} - else: - raise ValueError('Pool restarts not enabled') - - -@Panel.register -def autoscale(state, max=None, min=None): - autoscaler = state.consumer.controller.autoscaler - if autoscaler: - max_, min_ = autoscaler.update(max, min) - return {'ok': 'autoscale now max={0} min={1}'.format(max_, min_)} - raise ValueError('Autoscale not enabled') - - -@Panel.register -def shutdown(state, msg='Got shutdown from remote', **kwargs): - logger.warning(msg) - raise WorkerShutdown(msg) - - -@Panel.register -def add_consumer(state, queue, exchange=None, exchange_type=None, - routing_key=None, **options): - state.consumer.add_task_queue(queue, exchange, exchange_type, - routing_key, **options) - return {'ok': 'add consumer {0}'.format(queue)} - - -@Panel.register -def cancel_consumer(state, queue=None, **_): - state.consumer.cancel_task_queue(queue) - return {'ok': 'no longer consuming from {0}'.format(queue)} - - -@Panel.register -def active_queues(state): - """Return information about the queues a worker consumes from.""" - if state.consumer.task_consumer: - return [dict(queue.as_dict(recurse=True)) - for queue in state.consumer.task_consumer.queues] - return [] - - -def _wanted_config_key(key): - return (isinstance(key, string_t) and - key.isupper() and - not key.startswith('__')) - - -@Panel.register -def dump_conf(state, with_defaults=False, **kwargs): - return jsonify(state.app.conf.table(with_defaults=with_defaults), - keyfilter=_wanted_config_key, - unknown_type_filter=safe_repr) - - -@Panel.register -def election(state, id, topic, action=None, **kwargs): - if state.consumer.gossip: - state.consumer.gossip.election(id, topic, action) diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/heartbeat.py b/thesisenv/lib/python3.6/site-packages/celery/worker/heartbeat.py deleted file mode 100644 index cf46ab0..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/heartbeat.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.worker.heartbeat - ~~~~~~~~~~~~~~~~~~~~~~~ - - This is the internal thread that sends heartbeat events - at regular intervals. - -""" -from __future__ import absolute_import - -from celery.utils.sysinfo import load_average - -from .state import SOFTWARE_INFO, active_requests, all_total_count - -__all__ = ['Heart'] - - -class Heart(object): - """Timer sending heartbeats at regular intervals. - - :param timer: Timer instance. - :param eventer: Event dispatcher used to send the event. - :keyword interval: Time in seconds between heartbeats. - Default is 2 seconds. - - """ - - def __init__(self, timer, eventer, interval=None): - self.timer = timer - self.eventer = eventer - self.interval = float(interval or 2.0) - self.tref = None - - # Make event dispatcher start/stop us when enabled/disabled. - self.eventer.on_enabled.add(self.start) - self.eventer.on_disabled.add(self.stop) - - def _send(self, event): - return self.eventer.send(event, freq=self.interval, - active=len(active_requests), - processed=all_total_count[0], - loadavg=load_average(), - **SOFTWARE_INFO) - - def start(self): - if self.eventer.enabled: - self._send('worker-online') - self.tref = self.timer.call_repeatedly( - self.interval, self._send, ('worker-heartbeat', ), - ) - - def stop(self): - if self.tref is not None: - self.timer.cancel(self.tref) - self.tref = None - if self.eventer.enabled: - self._send('worker-offline') diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/job.py b/thesisenv/lib/python3.6/site-packages/celery/worker/job.py deleted file mode 100644 index 793de3d..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/job.py +++ /dev/null @@ -1,595 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.worker.job - ~~~~~~~~~~~~~~~~~ - - This module defines the :class:`Request` class, - which specifies how tasks are executed. - -""" -from __future__ import absolute_import, unicode_literals - -import logging -import socket -import sys - -from billiard.einfo import ExceptionInfo -from datetime import datetime -from weakref import ref - -from kombu.utils import kwdict, reprcall -from kombu.utils.encoding import safe_repr, safe_str - -from celery import signals -from celery.app.trace import trace_task, trace_task_ret -from celery.exceptions import ( - Ignore, TaskRevokedError, InvalidTaskError, - SoftTimeLimitExceeded, TimeLimitExceeded, - WorkerLostError, Terminated, Retry, Reject, -) -from celery.five import items, monotonic, string, string_t -from celery.platforms import signals as _signals -from celery.utils import fun_takes_kwargs -from celery.utils.functional import noop -from celery.utils.log import get_logger -from celery.utils.serialization import get_pickled_exception -from celery.utils.text import truncate -from celery.utils.timeutils import maybe_iso8601, timezone, maybe_make_aware - -from . import state - -__all__ = ['Request'] - -IS_PYPY = hasattr(sys, 'pypy_version_info') - -logger = get_logger(__name__) -debug, info, warn, error = (logger.debug, logger.info, - logger.warning, logger.error) -_does_info = False -_does_debug = False - -#: Max length of result representation -RESULT_MAXLEN = 128 - - -def __optimize__(): - # this is also called by celery.app.trace.setup_worker_optimizations - global _does_debug - global _does_info - _does_debug = logger.isEnabledFor(logging.DEBUG) - _does_info = logger.isEnabledFor(logging.INFO) -__optimize__() - -# Localize -tz_utc = timezone.utc -tz_or_local = timezone.tz_or_local -send_revoked = signals.task_revoked.send - -task_accepted = state.task_accepted -task_ready = state.task_ready -revoked_tasks = state.revoked - -NEEDS_KWDICT = sys.version_info <= (2, 6) - -#: Use when no message object passed to :class:`Request`. -DEFAULT_FIELDS = { - 'headers': None, - 'reply_to': None, - 'correlation_id': None, - 'delivery_info': { - 'exchange': None, - 'routing_key': None, - 'priority': 0, - 'redelivered': False, - }, -} - - -class Request(object): - """A request for task execution.""" - if not IS_PYPY: # pragma: no cover - __slots__ = ( - 'app', 'name', 'id', 'args', 'kwargs', 'on_ack', - 'hostname', 'eventer', 'connection_errors', 'task', 'eta', - 'expires', 'request_dict', 'acknowledged', 'on_reject', - 'utc', 'time_start', 'worker_pid', '_already_revoked', - '_terminate_on_ack', '_apply_result', - '_tzlocal', '__weakref__', '__dict__', - ) - - #: Format string used to log task success. - success_msg = """\ - Task %(name)s[%(id)s] succeeded in %(runtime)ss: %(return_value)s - """ - - #: Format string used to log task failure. - error_msg = """\ - Task %(name)s[%(id)s] %(description)s: %(exc)s - """ - - #: Format string used to log internal error. - internal_error_msg = """\ - Task %(name)s[%(id)s] %(description)s: %(exc)s - """ - - ignored_msg = """\ - Task %(name)s[%(id)s] %(description)s - """ - - rejected_msg = """\ - Task %(name)s[%(id)s] %(exc)s - """ - - #: Format string used to log task retry. - retry_msg = """Task %(name)s[%(id)s] retry: %(exc)s""" - - def __init__(self, body, on_ack=noop, - hostname=None, eventer=None, app=None, - connection_errors=None, request_dict=None, - message=None, task=None, on_reject=noop, **opts): - self.app = app - name = self.name = body['task'] - self.id = body['id'] - self.args = body.get('args', []) - self.kwargs = body.get('kwargs', {}) - try: - self.kwargs.items - except AttributeError: - raise InvalidTaskError( - 'Task keyword arguments is not a mapping') - if NEEDS_KWDICT: - self.kwargs = kwdict(self.kwargs) - eta = body.get('eta') - expires = body.get('expires') - utc = self.utc = body.get('utc', False) - self.on_ack = on_ack - self.on_reject = on_reject - self.hostname = hostname or socket.gethostname() - self.eventer = eventer - self.connection_errors = connection_errors or () - self.task = task or self.app.tasks[name] - self.acknowledged = self._already_revoked = False - self.time_start = self.worker_pid = self._terminate_on_ack = None - self._apply_result = None - self._tzlocal = None - - # timezone means the message is timezone-aware, and the only timezone - # supported at this point is UTC. - if eta is not None: - try: - self.eta = maybe_iso8601(eta) - except (AttributeError, ValueError, TypeError) as exc: - raise InvalidTaskError( - 'invalid eta value {0!r}: {1}'.format(eta, exc)) - if utc: - self.eta = maybe_make_aware(self.eta, self.tzlocal) - else: - self.eta = None - if expires is not None: - try: - self.expires = maybe_iso8601(expires) - except (AttributeError, ValueError, TypeError) as exc: - raise InvalidTaskError( - 'invalid expires value {0!r}: {1}'.format(expires, exc)) - if utc: - self.expires = maybe_make_aware(self.expires, self.tzlocal) - else: - self.expires = None - - if message: - delivery_info = message.delivery_info or {} - properties = message.properties or {} - body.update({ - 'headers': message.headers, - 'reply_to': properties.get('reply_to'), - 'correlation_id': properties.get('correlation_id'), - 'delivery_info': { - 'exchange': delivery_info.get('exchange'), - 'routing_key': delivery_info.get('routing_key'), - 'priority': properties.get( - 'priority', delivery_info.get('priority')), - 'redelivered': delivery_info.get('redelivered'), - } - - }) - else: - body.update(DEFAULT_FIELDS) - self.request_dict = body - - @property - def delivery_info(self): - return self.request_dict['delivery_info'] - - def extend_with_default_kwargs(self): - """Extend the tasks keyword arguments with standard task arguments. - - Currently these are `logfile`, `loglevel`, `task_id`, - `task_name`, `task_retries`, and `delivery_info`. - - See :meth:`celery.task.base.Task.run` for more information. - - Magic keyword arguments are deprecated and will be removed - in version 4.0. - - """ - kwargs = dict(self.kwargs) - default_kwargs = {'logfile': None, # deprecated - 'loglevel': None, # deprecated - 'task_id': self.id, - 'task_name': self.name, - 'task_retries': self.request_dict.get('retries', 0), - 'task_is_eager': False, - 'delivery_info': self.delivery_info} - fun = self.task.run - supported_keys = fun_takes_kwargs(fun, default_kwargs) - extend_with = dict((key, val) for key, val in items(default_kwargs) - if key in supported_keys) - kwargs.update(extend_with) - return kwargs - - def execute_using_pool(self, pool, **kwargs): - """Used by the worker to send this task to the pool. - - :param pool: A :class:`celery.concurrency.base.TaskPool` instance. - - :raises celery.exceptions.TaskRevokedError: if the task was revoked - and ignored. - - """ - uuid = self.id - task = self.task - if self.revoked(): - raise TaskRevokedError(uuid) - - hostname = self.hostname - kwargs = self.kwargs - if task.accept_magic_kwargs: - kwargs = self.extend_with_default_kwargs() - request = self.request_dict - request.update({'hostname': hostname, 'is_eager': False, - 'delivery_info': self.delivery_info, - 'group': self.request_dict.get('taskset')}) - timeout, soft_timeout = request.get('timelimit', (None, None)) - timeout = timeout or task.time_limit - soft_timeout = soft_timeout or task.soft_time_limit - result = pool.apply_async( - trace_task_ret, - args=(self.name, uuid, self.args, kwargs, request), - accept_callback=self.on_accepted, - timeout_callback=self.on_timeout, - callback=self.on_success, - error_callback=self.on_failure, - soft_timeout=soft_timeout, - timeout=timeout, - correlation_id=uuid, - ) - # cannot create weakref to None - self._apply_result = ref(result) if result is not None else result - return result - - def execute(self, loglevel=None, logfile=None): - """Execute the task in a :func:`~celery.app.trace.trace_task`. - - :keyword loglevel: The loglevel used by the task. - :keyword logfile: The logfile used by the task. - - """ - if self.revoked(): - return - - # acknowledge task as being processed. - if not self.task.acks_late: - self.acknowledge() - - kwargs = self.kwargs - if self.task.accept_magic_kwargs: - kwargs = self.extend_with_default_kwargs() - request = self.request_dict - request.update({'loglevel': loglevel, 'logfile': logfile, - 'hostname': self.hostname, 'is_eager': False, - 'delivery_info': self.delivery_info}) - retval = trace_task(self.task, self.id, self.args, kwargs, request, - hostname=self.hostname, loader=self.app.loader, - app=self.app) - self.acknowledge() - return retval - - def maybe_expire(self): - """If expired, mark the task as revoked.""" - if self.expires: - now = datetime.now(self.expires.tzinfo) - if now > self.expires: - revoked_tasks.add(self.id) - return True - - def terminate(self, pool, signal=None): - signal = _signals.signum(signal or 'TERM') - if self.time_start: - pool.terminate_job(self.worker_pid, signal) - self._announce_revoked('terminated', True, signal, False) - else: - self._terminate_on_ack = pool, signal - if self._apply_result is not None: - obj = self._apply_result() # is a weakref - if obj is not None: - obj.terminate(signal) - - def _announce_revoked(self, reason, terminated, signum, expired): - task_ready(self) - self.send_event('task-revoked', - terminated=terminated, signum=signum, expired=expired) - if self.store_errors: - self.task.backend.mark_as_revoked(self.id, reason, request=self) - self.acknowledge() - self._already_revoked = True - send_revoked(self.task, request=self, - terminated=terminated, signum=signum, expired=expired) - - def revoked(self): - """If revoked, skip task and mark state.""" - expired = False - if self._already_revoked: - return True - if self.expires: - expired = self.maybe_expire() - if self.id in revoked_tasks: - info('Discarding revoked task: %s[%s]', self.name, self.id) - self._announce_revoked( - 'expired' if expired else 'revoked', False, None, expired, - ) - return True - return False - - def send_event(self, type, **fields): - if self.eventer and self.eventer.enabled and self.task.send_events: - self.eventer.send(type, uuid=self.id, **fields) - - def on_accepted(self, pid, time_accepted): - """Handler called when task is accepted by worker pool.""" - self.worker_pid = pid - self.time_start = time_accepted - task_accepted(self) - if not self.task.acks_late: - self.acknowledge() - self.send_event('task-started') - if _does_debug: - debug('Task accepted: %s[%s] pid:%r', self.name, self.id, pid) - if self._terminate_on_ack is not None: - self.terminate(*self._terminate_on_ack) - - def on_timeout(self, soft, timeout): - """Handler called if the task times out.""" - task_ready(self) - if soft: - warn('Soft time limit (%ss) exceeded for %s[%s]', - timeout, self.name, self.id) - exc = SoftTimeLimitExceeded(timeout) - else: - error('Hard time limit (%ss) exceeded for %s[%s]', - timeout, self.name, self.id) - exc = TimeLimitExceeded(timeout) - - if self.store_errors: - self.task.backend.mark_as_failure(self.id, exc, request=self) - - if self.task.acks_late: - self.acknowledge() - - def on_success(self, ret_value, now=None, nowfun=monotonic): - """Handler called if the task was successfully processed.""" - if isinstance(ret_value, ExceptionInfo): - if isinstance(ret_value.exception, ( - SystemExit, KeyboardInterrupt)): - raise ret_value.exception - return self.on_failure(ret_value) - task_ready(self) - - if self.task.acks_late: - self.acknowledge() - - if self.eventer and self.eventer.enabled: - now = nowfun() - runtime = self.time_start and (now - self.time_start) or 0 - self.send_event('task-succeeded', - result=safe_repr(ret_value), runtime=runtime) - - if _does_info: - now = now or nowfun() - runtime = self.time_start and (now - self.time_start) or 0 - info(self.success_msg.strip(), { - 'id': self.id, 'name': self.name, - 'return_value': self.repr_result(ret_value), - 'runtime': runtime}) - - def on_retry(self, exc_info): - """Handler called if the task should be retried.""" - if self.task.acks_late: - self.acknowledge() - - self.send_event('task-retried', - exception=safe_repr(exc_info.exception.exc), - traceback=safe_str(exc_info.traceback)) - - if _does_info: - info(self.retry_msg.strip(), - {'id': self.id, 'name': self.name, - 'exc': exc_info.exception}) - - def on_failure(self, exc_info): - """Handler called if the task raised an exception.""" - task_ready(self) - send_failed_event = True - - if not exc_info.internal: - exc = exc_info.exception - - if isinstance(exc, Retry): - return self.on_retry(exc_info) - - # These are special cases where the process would not have had - # time to write the result. - if self.store_errors: - if isinstance(exc, WorkerLostError): - self.task.backend.mark_as_failure( - self.id, exc, request=self, - ) - elif isinstance(exc, Terminated): - self._announce_revoked( - 'terminated', True, string(exc), False) - send_failed_event = False # already sent revoked event - # (acks_late) acknowledge after result stored. - if self.task.acks_late: - self.acknowledge() - self._log_error(exc_info, send_failed_event=send_failed_event) - - def _log_error(self, einfo, send_failed_event=True): - einfo.exception = get_pickled_exception(einfo.exception) - eobj = einfo.exception - exception, traceback, exc_info, internal, sargs, skwargs = ( - safe_repr(eobj), - safe_str(einfo.traceback), - einfo.exc_info, - einfo.internal, - safe_repr(self.args), - safe_repr(self.kwargs), - ) - task = self.task - if task.throws and isinstance(eobj, task.throws): - do_send_mail, severity, exc_info, description = ( - False, logging.INFO, None, 'raised expected', - ) - else: - do_send_mail, severity, description = ( - True, logging.ERROR, 'raised unexpected', - ) - - format = self.error_msg - if internal: - if isinstance(einfo.exception, MemoryError): - raise MemoryError('Process got: %s' % (einfo.exception, )) - elif isinstance(einfo.exception, Reject): - format = self.rejected_msg - description = 'rejected' - severity = logging.WARN - send_failed_event = False - self.reject(requeue=einfo.exception.requeue) - elif isinstance(einfo.exception, Ignore): - format = self.ignored_msg - description = 'ignored' - severity = logging.INFO - exc_info = None - send_failed_event = False - self.acknowledge() - else: - format = self.internal_error_msg - description = 'INTERNAL ERROR' - severity = logging.CRITICAL - - if send_failed_event: - self.send_event( - 'task-failed', exception=exception, traceback=traceback, - ) - - context = { - 'hostname': self.hostname, - 'id': self.id, - 'name': self.name, - 'exc': exception, - 'traceback': traceback, - 'args': sargs, - 'kwargs': skwargs, - 'description': description, - } - - logger.log(severity, format.strip(), context, - exc_info=exc_info, - extra={'data': {'id': self.id, - 'name': self.name, - 'args': sargs, - 'kwargs': skwargs, - 'hostname': self.hostname, - 'internal': internal}}) - - if do_send_mail: - task.send_error_email(context, einfo.exception) - - def acknowledge(self): - """Acknowledge task.""" - if not self.acknowledged: - self.on_ack(logger, self.connection_errors) - self.acknowledged = True - - def reject(self, requeue=False): - if not self.acknowledged: - self.on_reject(logger, self.connection_errors, requeue) - self.acknowledged = True - - def repr_result(self, result, maxlen=RESULT_MAXLEN): - # 46 is the length needed to fit - # 'the quick brown fox jumps over the lazy dog' :) - if not isinstance(result, string_t): - result = safe_repr(result) - return truncate(result) if len(result) > maxlen else result - - def info(self, safe=False): - return {'id': self.id, - 'name': self.name, - 'args': self.args if safe else safe_repr(self.args), - 'kwargs': self.kwargs if safe else safe_repr(self.kwargs), - 'hostname': self.hostname, - 'time_start': self.time_start, - 'acknowledged': self.acknowledged, - 'delivery_info': self.delivery_info, - 'worker_pid': self.worker_pid} - - def __str__(self): - return '{0.name}[{0.id}]{1}{2}'.format( - self, - ' eta:[{0}]'.format(self.eta) if self.eta else '', - ' expires:[{0}]'.format(self.expires) if self.expires else '', - ) - shortinfo = __str__ - - def __repr__(self): - return '<{0} {1}: {2}>'.format( - type(self).__name__, self.id, - reprcall(self.name, self.args, self.kwargs)) - - @property - def tzlocal(self): - if self._tzlocal is None: - self._tzlocal = self.app.conf.CELERY_TIMEZONE - return self._tzlocal - - @property - def store_errors(self): - return (not self.task.ignore_result or - self.task.store_errors_even_if_ignored) - - @property - def task_id(self): - # XXX compat - return self.id - - @task_id.setter # noqa - def task_id(self, value): - self.id = value - - @property - def task_name(self): - # XXX compat - return self.name - - @task_name.setter # noqa - def task_name(self, value): - self.name = value - - @property - def reply_to(self): - # used by rpc backend when failures reported by parent process - return self.request_dict['reply_to'] - - @property - def correlation_id(self): - # used similarly to reply_to - return self.request_dict['correlation_id'] diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/loops.py b/thesisenv/lib/python3.6/site-packages/celery/worker/loops.py deleted file mode 100644 index 8b006a8..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/loops.py +++ /dev/null @@ -1,108 +0,0 @@ -""" -celery.worker.loop -~~~~~~~~~~~~~~~~~~ - -The consumers highly-optimized inner loop. - -""" -from __future__ import absolute_import - -import socket - -from celery.bootsteps import RUN -from celery.exceptions import WorkerShutdown, WorkerTerminate, WorkerLostError -from celery.utils.log import get_logger - -from . import state - -__all__ = ['asynloop', 'synloop'] - -logger = get_logger(__name__) -error = logger.error - - -def asynloop(obj, connection, consumer, blueprint, hub, qos, - heartbeat, clock, hbrate=2.0, RUN=RUN): - """Non-blocking event loop consuming messages until connection is lost, - or shutdown is requested.""" - update_qos = qos.update - hbtick = connection.heartbeat_check - errors = connection.connection_errors - heartbeat = connection.get_heartbeat_interval() # negotiated - - on_task_received = obj.create_task_handler() - - if heartbeat and connection.supports_heartbeats: - hub.call_repeatedly(heartbeat / hbrate, hbtick, hbrate) - - consumer.callbacks = [on_task_received] - consumer.consume() - obj.on_ready() - obj.controller.register_with_event_loop(hub) - obj.register_with_event_loop(hub) - - # did_start_ok will verify that pool processes were able to start, - # but this will only work the first time we start, as - # maxtasksperchild will mess up metrics. - if not obj.restart_count and not obj.pool.did_start_ok(): - raise WorkerLostError('Could not start worker processes') - - # consumer.consume() may have prefetched up to our - # limit - drain an event so we are in a clean state - # prior to starting our event loop. - if connection.transport.driver_type == 'amqp': - hub.call_soon(connection.drain_events) - - # FIXME: Use loop.run_forever - # Tried and works, but no time to test properly before release. - hub.propagate_errors = errors - loop = hub.create_loop() - - try: - while blueprint.state == RUN and obj.connection: - # shutdown if signal handlers told us to. - if state.should_stop: - raise WorkerShutdown() - elif state.should_terminate: - raise WorkerTerminate() - - # We only update QoS when there is no more messages to read. - # This groups together qos calls, and makes sure that remote - # control commands will be prioritized over task messages. - if qos.prev != qos.value: - update_qos() - - try: - next(loop) - except StopIteration: - loop = hub.create_loop() - finally: - try: - hub.reset() - except Exception as exc: - error( - 'Error cleaning up after event loop: %r', exc, exc_info=1, - ) - - -def synloop(obj, connection, consumer, blueprint, hub, qos, - heartbeat, clock, hbrate=2.0, **kwargs): - """Fallback blocking event loop for transports that doesn't support AIO.""" - - on_task_received = obj.create_task_handler() - consumer.register_callback(on_task_received) - consumer.consume() - - obj.on_ready() - - while blueprint.state == RUN and obj.connection: - state.maybe_shutdown() - if qos.prev != qos.value: - qos.update() - try: - connection.drain_events(timeout=2.0) - except socket.timeout: - pass - except socket.error: - if blueprint.state == RUN: - raise diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/pidbox.py b/thesisenv/lib/python3.6/site-packages/celery/worker/pidbox.py deleted file mode 100644 index 058edd4..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/pidbox.py +++ /dev/null @@ -1,116 +0,0 @@ -from __future__ import absolute_import - -import socket -import threading - -from kombu.common import ignore_errors -from kombu.utils.encoding import safe_str - -from celery.datastructures import AttributeDict -from celery.utils.log import get_logger - -from . import control - -__all__ = ['Pidbox', 'gPidbox'] - -logger = get_logger(__name__) -debug, error, info = logger.debug, logger.error, logger.info - - -class Pidbox(object): - consumer = None - - def __init__(self, c): - self.c = c - self.hostname = c.hostname - self.node = c.app.control.mailbox.Node( - safe_str(c.hostname), - handlers=control.Panel.data, - state=AttributeDict(app=c.app, hostname=c.hostname, consumer=c), - ) - self._forward_clock = self.c.app.clock.forward - - def on_message(self, body, message): - # just increase clock as clients usually don't - # have a valid clock to adjust with. - self._forward_clock() - try: - self.node.handle_message(body, message) - except KeyError as exc: - error('No such control command: %s', exc) - except Exception as exc: - error('Control command error: %r', exc, exc_info=True) - self.reset() - - def start(self, c): - self.node.channel = c.connection.channel() - self.consumer = self.node.listen(callback=self.on_message) - self.consumer.on_decode_error = c.on_decode_error - - def on_stop(self): - pass - - def stop(self, c): - self.on_stop() - self.consumer = self._close_channel(c) - - def reset(self): - """Sets up the process mailbox.""" - self.stop(self.c) - self.start(self.c) - - def _close_channel(self, c): - if self.node and self.node.channel: - ignore_errors(c, self.node.channel.close) - - def shutdown(self, c): - self.on_stop() - if self.consumer: - debug('Canceling broadcast consumer...') - ignore_errors(c, self.consumer.cancel) - self.stop(self.c) - - -class gPidbox(Pidbox): - _node_shutdown = None - _node_stopped = None - _resets = 0 - - def start(self, c): - c.pool.spawn_n(self.loop, c) - - def on_stop(self): - if self._node_stopped: - self._node_shutdown.set() - debug('Waiting for broadcast thread to shutdown...') - self._node_stopped.wait() - self._node_stopped = self._node_shutdown = None - - def reset(self): - self._resets += 1 - - def _do_reset(self, c, connection): - self._close_channel(c) - self.node.channel = connection.channel() - self.consumer = self.node.listen(callback=self.on_message) - self.consumer.consume() - - def loop(self, c): - resets = [self._resets] - shutdown = self._node_shutdown = threading.Event() - stopped = self._node_stopped = threading.Event() - try: - with c.connect() as connection: - - info('pidbox: Connected to %s.', connection.as_uri()) - self._do_reset(c, connection) - while not shutdown.is_set() and c.connection: - if resets[0] < self._resets: - resets[0] += 1 - self._do_reset(c, connection) - try: - connection.drain_events(timeout=1.0) - except socket.timeout: - pass - finally: - stopped.set() diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/request.py b/thesisenv/lib/python3.6/site-packages/celery/worker/request.py deleted file mode 100644 index 8a65701..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/request.py +++ /dev/null @@ -1,536 +0,0 @@ -# -*- coding: utf-8 -*- -"""This module defines the :class:`Request` class, that specifies -how tasks are executed.""" -from __future__ import absolute_import, unicode_literals - -import logging -import sys - -from datetime import datetime -from weakref import ref - -from billiard.common import TERM_SIGNAME -from kombu.utils.encoding import safe_repr, safe_str -from kombu.utils.objects import cached_property - -from celery import signals -from celery.app.trace import trace_task, trace_task_ret -from celery.exceptions import ( - Ignore, TaskRevokedError, InvalidTaskError, - SoftTimeLimitExceeded, TimeLimitExceeded, - WorkerLostError, Terminated, Retry, Reject, -) -from celery.five import python_2_unicode_compatible, string -from celery.platforms import signals as _signals -from celery.utils.functional import maybe, noop -from celery.utils.log import get_logger -from celery.utils.nodenames import gethostname -from celery.utils.time import maybe_iso8601, timezone, maybe_make_aware -from celery.utils.serialization import get_pickled_exception - -from . import state - -__all__ = ['Request'] - -IS_PYPY = hasattr(sys, 'pypy_version_info') - -logger = get_logger(__name__) -debug, info, warn, error = (logger.debug, logger.info, - logger.warning, logger.error) -_does_info = False -_does_debug = False - - -def __optimize__(): - # this is also called by celery.app.trace.setup_worker_optimizations - global _does_debug - global _does_info - _does_debug = logger.isEnabledFor(logging.DEBUG) - _does_info = logger.isEnabledFor(logging.INFO) -__optimize__() - -# Localize -tz_or_local = timezone.tz_or_local -send_revoked = signals.task_revoked.send - -task_accepted = state.task_accepted -task_ready = state.task_ready -revoked_tasks = state.revoked - - -@python_2_unicode_compatible -class Request(object): - """A request for task execution.""" - acknowledged = False - time_start = None - worker_pid = None - time_limits = (None, None) - _already_revoked = False - _terminate_on_ack = None - _apply_result = None - _tzlocal = None - - if not IS_PYPY: # pragma: no cover - __slots__ = ( - 'app', 'type', 'name', 'id', 'root_id', 'parent_id', - 'on_ack', 'body', 'hostname', 'eventer', 'connection_errors', - 'task', 'eta', 'expires', 'request_dict', 'on_reject', 'utc', - 'content_type', 'content_encoding', 'argsrepr', 'kwargsrepr', - '_decoded', - '__weakref__', '__dict__', - ) - - def __init__(self, message, on_ack=noop, - hostname=None, eventer=None, app=None, - connection_errors=None, request_dict=None, - task=None, on_reject=noop, body=None, - headers=None, decoded=False, utc=True, - maybe_make_aware=maybe_make_aware, - maybe_iso8601=maybe_iso8601, **opts): - if headers is None: - headers = message.headers - if body is None: - body = message.body - self.app = app - self.message = message - self.body = body - self.utc = utc - self._decoded = decoded - if decoded: - self.content_type = self.content_encoding = None - else: - self.content_type, self.content_encoding = ( - message.content_type, message.content_encoding, - ) - - self.id = headers['id'] - type = self.type = self.name = headers['task'] - self.root_id = headers.get('root_id') - self.parent_id = headers.get('parent_id') - if 'shadow' in headers: - self.name = headers['shadow'] or self.name - if 'timelimit' in headers: - self.time_limits = headers['timelimit'] - self.argsrepr = headers.get('argsrepr', '') - self.kwargsrepr = headers.get('kwargsrepr', '') - self.on_ack = on_ack - self.on_reject = on_reject - self.hostname = hostname or gethostname() - self.eventer = eventer - self.connection_errors = connection_errors or () - self.task = task or self.app.tasks[type] - - # timezone means the message is timezone-aware, and the only timezone - # supported at this point is UTC. - eta = headers.get('eta') - if eta is not None: - try: - eta = maybe_iso8601(eta) - except (AttributeError, ValueError, TypeError) as exc: - raise InvalidTaskError( - 'invalid ETA value {0!r}: {1}'.format(eta, exc)) - self.eta = maybe_make_aware(eta, self.tzlocal) - else: - self.eta = None - - expires = headers.get('expires') - if expires is not None: - try: - expires = maybe_iso8601(expires) - except (AttributeError, ValueError, TypeError) as exc: - raise InvalidTaskError( - 'invalid expires value {0!r}: {1}'.format(expires, exc)) - self.expires = maybe_make_aware(expires, self.tzlocal) - else: - self.expires = None - - delivery_info = message.delivery_info or {} - properties = message.properties or {} - headers.update({ - 'reply_to': properties.get('reply_to'), - 'correlation_id': properties.get('correlation_id'), - 'delivery_info': { - 'exchange': delivery_info.get('exchange'), - 'routing_key': delivery_info.get('routing_key'), - 'priority': properties.get('priority'), - 'redelivered': delivery_info.get('redelivered'), - } - - }) - self.request_dict = headers - - @property - def delivery_info(self): - return self.request_dict['delivery_info'] - - def execute_using_pool(self, pool, **kwargs): - """Used by the worker to send this task to the pool. - - Arguments: - pool (~celery.concurrency.base.TaskPool): The execution pool - used to execute this request. - - Raises: - celery.exceptions.TaskRevokedError: if the task was revoked. - """ - task_id = self.id - task = self.task - if self.revoked(): - raise TaskRevokedError(task_id) - - time_limit, soft_time_limit = self.time_limits - result = pool.apply_async( - trace_task_ret, - args=(self.type, task_id, self.request_dict, self.body, - self.content_type, self.content_encoding), - accept_callback=self.on_accepted, - timeout_callback=self.on_timeout, - callback=self.on_success, - error_callback=self.on_failure, - soft_timeout=soft_time_limit or task.soft_time_limit, - timeout=time_limit or task.time_limit, - correlation_id=task_id, - ) - # cannot create weakref to None - self._apply_result = maybe(ref, result) - return result - - def execute(self, loglevel=None, logfile=None): - """Execute the task in a :func:`~celery.app.trace.trace_task`. - - Arguments: - loglevel (int): The loglevel used by the task. - logfile (str): The logfile used by the task. - """ - if self.revoked(): - return - - # acknowledge task as being processed. - if not self.task.acks_late: - self.acknowledge() - - request = self.request_dict - args, kwargs, embed = self._payload - request.update({'loglevel': loglevel, 'logfile': logfile, - 'hostname': self.hostname, 'is_eager': False, - 'args': args, 'kwargs': kwargs}, **embed or {}) - retval = trace_task(self.task, self.id, args, kwargs, request, - hostname=self.hostname, loader=self.app.loader, - app=self.app)[0] - self.acknowledge() - return retval - - def maybe_expire(self): - """If expired, mark the task as revoked.""" - if self.expires: - now = datetime.now(self.expires.tzinfo) - if now > self.expires: - revoked_tasks.add(self.id) - return True - - def terminate(self, pool, signal=None): - signal = _signals.signum(signal or TERM_SIGNAME) - if self.time_start: - pool.terminate_job(self.worker_pid, signal) - self._announce_revoked('terminated', True, signal, False) - else: - self._terminate_on_ack = pool, signal - if self._apply_result is not None: - obj = self._apply_result() # is a weakref - if obj is not None: - obj.terminate(signal) - - def _announce_revoked(self, reason, terminated, signum, expired): - task_ready(self) - self.send_event('task-revoked', - terminated=terminated, signum=signum, expired=expired) - self.task.backend.mark_as_revoked( - self.id, reason, request=self, store_result=self.store_errors, - ) - self.acknowledge() - self._already_revoked = True - send_revoked(self.task, request=self, - terminated=terminated, signum=signum, expired=expired) - - def revoked(self): - """If revoked, skip task and mark state.""" - expired = False - if self._already_revoked: - return True - if self.expires: - expired = self.maybe_expire() - if self.id in revoked_tasks: - info('Discarding revoked task: %s[%s]', self.name, self.id) - self._announce_revoked( - 'expired' if expired else 'revoked', False, None, expired, - ) - return True - return False - - def send_event(self, type, **fields): - if self.eventer and self.eventer.enabled and self.task.send_events: - self.eventer.send(type, uuid=self.id, **fields) - - def on_accepted(self, pid, time_accepted): - """Handler called when task is accepted by worker pool.""" - self.worker_pid = pid - self.time_start = time_accepted - task_accepted(self) - if not self.task.acks_late: - self.acknowledge() - self.send_event('task-started') - if _does_debug: - debug('Task accepted: %s[%s] pid:%r', self.name, self.id, pid) - if self._terminate_on_ack is not None: - self.terminate(*self._terminate_on_ack) - - def on_timeout(self, soft, timeout): - """Handler called if the task times out.""" - task_ready(self) - if soft: - warn('Soft time limit (%ss) exceeded for %s[%s]', - soft, self.name, self.id) - exc = SoftTimeLimitExceeded(soft) - else: - error('Hard time limit (%ss) exceeded for %s[%s]', - timeout, self.name, self.id) - exc = TimeLimitExceeded(timeout) - - self.task.backend.mark_as_failure( - self.id, exc, request=self, store_result=self.store_errors, - ) - - if self.task.acks_late: - self.acknowledge() - - def on_success(self, failed__retval__runtime, **kwargs): - """Handler called if the task was successfully processed.""" - failed, retval, runtime = failed__retval__runtime - if failed: - if isinstance(retval.exception, (SystemExit, KeyboardInterrupt)): - raise retval.exception - return self.on_failure(retval, return_ok=True) - task_ready(self) - - if self.task.acks_late: - self.acknowledge() - - self.send_event('task-succeeded', result=retval, runtime=runtime) - - def on_retry(self, exc_info): - """Handler called if the task should be retried.""" - if self.task.acks_late: - self.acknowledge() - - self.send_event('task-retried', - exception=safe_repr(exc_info.exception.exc), - traceback=safe_str(exc_info.traceback)) - - def on_failure(self, exc_info, send_failed_event=True, return_ok=False): - """Handler called if the task raised an exception.""" - task_ready(self) - if isinstance(exc_info.exception, MemoryError): - raise MemoryError('Process got: %s' % (exc_info.exception,)) - elif isinstance(exc_info.exception, Reject): - return self.reject(requeue=exc_info.exception.requeue) - elif isinstance(exc_info.exception, Ignore): - return self.acknowledge() - - exc = exc_info.exception - - if isinstance(exc, Retry): - return self.on_retry(exc_info) - - # These are special cases where the process wouldn't've had - # time to write the result. - if isinstance(exc, Terminated): - self._announce_revoked( - 'terminated', True, string(exc), False) - send_failed_event = False # already sent revoked event - elif isinstance(exc, WorkerLostError) or not return_ok: - self.task.backend.mark_as_failure( - self.id, exc, request=self, store_result=self.store_errors, - ) - # (acks_late) acknowledge after result stored. - if self.task.acks_late: - requeue = self.delivery_info.get('redelivered', None) is False - reject = ( - self.task.reject_on_worker_lost and - isinstance(exc, WorkerLostError) - ) - if reject: - self.reject(requeue=requeue) - send_failed_event = False - else: - self.acknowledge() - - if send_failed_event: - self.send_event( - 'task-failed', - exception=safe_repr(get_pickled_exception(exc_info.exception)), - traceback=exc_info.traceback, - ) - - if not return_ok: - error('Task handler raised error: %r', exc, - exc_info=exc_info.exc_info) - - def acknowledge(self): - """Acknowledge task.""" - if not self.acknowledged: - self.on_ack(logger, self.connection_errors) - self.acknowledged = True - - def reject(self, requeue=False): - if not self.acknowledged: - self.on_reject(logger, self.connection_errors, requeue) - self.acknowledged = True - self.send_event('task-rejected', requeue=requeue) - - def info(self, safe=False): - return { - 'id': self.id, - 'name': self.name, - 'args': self.argsrepr, - 'kwargs': self.kwargsrepr, - 'type': self.type, - 'body': self.body, - 'hostname': self.hostname, - 'time_start': self.time_start, - 'acknowledged': self.acknowledged, - 'delivery_info': self.delivery_info, - 'worker_pid': self.worker_pid, - } - - def __str__(self): - return ' '.join([ - self.humaninfo(), - ' ETA:[{0}]'.format(self.eta) if self.eta else '', - ' expires:[{0}]'.format(self.expires) if self.expires else '', - ]) - - def humaninfo(self): - return '{0.name}[{0.id}]'.format(self) - - def __repr__(self): - return '<{0}: {1} {2} {3}>'.format( - type(self).__name__, self.humaninfo(), - self.argsrepr, self.kwargsrepr, - ) - - @property - def tzlocal(self): - if self._tzlocal is None: - self._tzlocal = self.app.conf.timezone - return self._tzlocal - - @property - def store_errors(self): - return (not self.task.ignore_result or - self.task.store_errors_even_if_ignored) - - @property - def task_id(self): - # XXX compat - return self.id - - @task_id.setter # noqa - def task_id(self, value): - self.id = value - - @property - def task_name(self): - # XXX compat - return self.name - - @task_name.setter # noqa - def task_name(self, value): - self.name = value - - @property - def reply_to(self): - # used by rpc backend when failures reported by parent process - return self.request_dict['reply_to'] - - @property - def correlation_id(self): - # used similarly to reply_to - return self.request_dict['correlation_id'] - - @cached_property - def _payload(self): - return self.body if self._decoded else self.message.payload - - @cached_property - def chord(self): - # used by backend.mark_as_failure when failure is reported - # by parent process - _, _, embed = self._payload - return embed.get('chord') - - @cached_property - def errbacks(self): - # used by backend.mark_as_failure when failure is reported - # by parent process - _, _, embed = self._payload - return embed.get('errbacks') - - @cached_property - def group(self): - # used by backend.on_chord_part_return when failures reported - # by parent process - return self.request_dict['group'] - - -def create_request_cls(base, task, pool, hostname, eventer, - ref=ref, revoked_tasks=revoked_tasks, - task_ready=task_ready): - from celery.app.trace import trace_task_ret as trace - default_time_limit = task.time_limit - default_soft_time_limit = task.soft_time_limit - apply_async = pool.apply_async - acks_late = task.acks_late - events = eventer and eventer.enabled - - class Request(base): - - def execute_using_pool(self, pool, **kwargs): - task_id = self.id - if (self.expires or task_id in revoked_tasks) and self.revoked(): - raise TaskRevokedError(task_id) - - time_limit, soft_time_limit = self.time_limits - result = apply_async( - trace, - args=(self.type, task_id, self.request_dict, self.body, - self.content_type, self.content_encoding), - accept_callback=self.on_accepted, - timeout_callback=self.on_timeout, - callback=self.on_success, - error_callback=self.on_failure, - soft_timeout=soft_time_limit or default_soft_time_limit, - timeout=time_limit or default_time_limit, - correlation_id=task_id, - ) - # cannot create weakref to None - self._apply_result = maybe(ref, result) - return result - - def on_success(self, failed__retval__runtime, **kwargs): - failed, retval, runtime = failed__retval__runtime - if failed: - if isinstance(retval.exception, ( - SystemExit, KeyboardInterrupt)): - raise retval.exception - return self.on_failure(retval, return_ok=True) - task_ready(self) - - if acks_late: - self.acknowledge() - - if events: - self.send_event( - 'task-succeeded', result=retval, runtime=runtime, - ) - - return Request diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/state.py b/thesisenv/lib/python3.6/site-packages/celery/worker/state.py deleted file mode 100644 index 1aa4cbc..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/state.py +++ /dev/null @@ -1,246 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.worker.state - ~~~~~~~~~~~~~~~~~~~ - - Internal worker state (global) - - This includes the currently active and reserved tasks, - statistics, and revoked tasks. - -""" -from __future__ import absolute_import - -import os -import sys -import platform -import shelve -import zlib - -from kombu.serialization import pickle, pickle_protocol -from kombu.utils import cached_property - -from celery import __version__ -from celery.datastructures import LimitedSet -from celery.exceptions import WorkerShutdown, WorkerTerminate -from celery.five import Counter - -__all__ = ['SOFTWARE_INFO', 'reserved_requests', 'active_requests', - 'total_count', 'revoked', 'task_reserved', 'maybe_shutdown', - 'task_accepted', 'task_ready', 'task_reserved', 'task_ready', - 'Persistent'] - -#: Worker software/platform information. -SOFTWARE_INFO = {'sw_ident': 'py-celery', - 'sw_ver': __version__, - 'sw_sys': platform.system()} - -#: maximum number of revokes to keep in memory. -REVOKES_MAX = 50000 - -#: how many seconds a revoke will be active before -#: being expired when the max limit has been exceeded. -REVOKE_EXPIRES = 10800 - -#: set of all reserved :class:`~celery.worker.job.Request`'s. -reserved_requests = set() - -#: set of currently active :class:`~celery.worker.job.Request`'s. -active_requests = set() - -#: count of tasks accepted by the worker, sorted by type. -total_count = Counter() - -#: count of all tasks accepted by the worker -all_total_count = [0] - -#: the list of currently revoked tasks. Persistent if statedb set. -revoked = LimitedSet(maxlen=REVOKES_MAX, expires=REVOKE_EXPIRES) - -#: Update global state when a task has been reserved. -task_reserved = reserved_requests.add - -should_stop = False -should_terminate = False - - -def reset_state(): - reserved_requests.clear() - active_requests.clear() - total_count.clear() - all_total_count[:] = [0] - revoked.clear() - - -def maybe_shutdown(): - if should_stop: - raise WorkerShutdown() - elif should_terminate: - raise WorkerTerminate() - - -def task_accepted(request, _all_total_count=all_total_count): - """Updates global state when a task has been accepted.""" - active_requests.add(request) - total_count[request.name] += 1 - all_total_count[0] += 1 - - -def task_ready(request): - """Updates global state when a task is ready.""" - active_requests.discard(request) - reserved_requests.discard(request) - - -C_BENCH = os.environ.get('C_BENCH') or os.environ.get('CELERY_BENCH') -C_BENCH_EVERY = int(os.environ.get('C_BENCH_EVERY') or - os.environ.get('CELERY_BENCH_EVERY') or 1000) -if C_BENCH: # pragma: no cover - import atexit - - from billiard import current_process - from celery.five import monotonic - from celery.utils.debug import memdump, sample_mem - - all_count = 0 - bench_first = None - bench_start = None - bench_last = None - bench_every = C_BENCH_EVERY - bench_sample = [] - __reserved = task_reserved - __ready = task_ready - - if current_process()._name == 'MainProcess': - @atexit.register - def on_shutdown(): - if bench_first is not None and bench_last is not None: - print('- Time spent in benchmark: {0!r}'.format( - bench_last - bench_first)) - print('- Avg: {0}'.format( - sum(bench_sample) / len(bench_sample))) - memdump() - - def task_reserved(request): # noqa - global bench_start - global bench_first - now = None - if bench_start is None: - bench_start = now = monotonic() - if bench_first is None: - bench_first = now - - return __reserved(request) - - def task_ready(request): # noqa - global all_count - global bench_start - global bench_last - all_count += 1 - if not all_count % bench_every: - now = monotonic() - diff = now - bench_start - print('- Time spent processing {0} tasks (since first ' - 'task received): ~{1:.4f}s\n'.format(bench_every, diff)) - sys.stdout.flush() - bench_start = bench_last = now - bench_sample.append(diff) - sample_mem() - return __ready(request) - - -class Persistent(object): - """This is the persistent data stored by the worker when - :option:`--statedb` is enabled. - - It currently only stores revoked task id's. - - """ - storage = shelve - protocol = pickle_protocol - compress = zlib.compress - decompress = zlib.decompress - _is_open = False - - def __init__(self, state, filename, clock=None): - self.state = state - self.filename = filename - self.clock = clock - self.merge() - - def open(self): - return self.storage.open( - self.filename, protocol=self.protocol, writeback=True, - ) - - def merge(self): - self._merge_with(self.db) - - def sync(self): - self._sync_with(self.db) - self.db.sync() - - def close(self): - if self._is_open: - self.db.close() - self._is_open = False - - def save(self): - self.sync() - self.close() - - def _merge_with(self, d): - self._merge_revoked(d) - self._merge_clock(d) - return d - - def _sync_with(self, d): - self._revoked_tasks.purge() - d.update( - __proto__=3, - zrevoked=self.compress(self._dumps(self._revoked_tasks)), - clock=self.clock.forward() if self.clock else 0, - ) - return d - - def _merge_clock(self, d): - if self.clock: - d['clock'] = self.clock.adjust(d.get('clock') or 0) - - def _merge_revoked(self, d): - try: - self._merge_revoked_v3(d['zrevoked']) - except KeyError: - try: - self._merge_revoked_v2(d.pop('revoked')) - except KeyError: - pass - # purge expired items at boot - self._revoked_tasks.purge() - - def _merge_revoked_v3(self, zrevoked): - if zrevoked: - self._revoked_tasks.update(pickle.loads(self.decompress(zrevoked))) - - def _merge_revoked_v2(self, saved): - if not isinstance(saved, LimitedSet): - # (pre 3.0.18) used to be stored as a dict - return self._merge_revoked_v1(saved) - self._revoked_tasks.update(saved) - - def _merge_revoked_v1(self, saved): - add = self._revoked_tasks.add - for item in saved: - add(item) - - def _dumps(self, obj): - return pickle.dumps(obj, protocol=self.protocol) - - @property - def _revoked_tasks(self): - return self.state.revoked - - @cached_property - def db(self): - self._is_open = True - return self.open() diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/strategy.py b/thesisenv/lib/python3.6/site-packages/celery/worker/strategy.py deleted file mode 100644 index da69b43..0000000 --- a/thesisenv/lib/python3.6/site-packages/celery/worker/strategy.py +++ /dev/null @@ -1,95 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.worker.strategy - ~~~~~~~~~~~~~~~~~~~~~~ - - Task execution strategy (optimization). - -""" -from __future__ import absolute_import - -import logging - -from kombu.async.timer import to_timestamp -from kombu.utils.encoding import safe_repr - -from celery.utils.log import get_logger -from celery.utils.timeutils import timezone - -from .job import Request -from .state import task_reserved - -__all__ = ['default'] - -logger = get_logger(__name__) - - -def default(task, app, consumer, - info=logger.info, error=logger.error, task_reserved=task_reserved, - to_system_tz=timezone.to_system): - Req = Request - hostname = consumer.hostname - connection_errors = consumer.connection_errors - _does_info = logger.isEnabledFor(logging.INFO) - - # task event related - # (optimized to avoid calling request.send_event) - eventer = consumer.event_dispatcher - events = eventer and eventer.enabled - send_event = eventer.send - task_sends_events = events and task.send_events - - call_at = consumer.timer.call_at - apply_eta_task = consumer.apply_eta_task - rate_limits_enabled = not consumer.disable_rate_limits - get_bucket = consumer.task_buckets.__getitem__ - handle = consumer.on_task_request - limit_task = consumer._limit_task - - def task_message_handler(message, body, ack, reject, callbacks, - to_timestamp=to_timestamp): - req = Req(body, on_ack=ack, on_reject=reject, - app=app, hostname=hostname, - eventer=eventer, task=task, - connection_errors=connection_errors, - message=message) - if req.revoked(): - return - - if _does_info: - info('Received task: %s', req) - - if task_sends_events: - send_event( - 'task-received', - uuid=req.id, name=req.name, - args=safe_repr(req.args), kwargs=safe_repr(req.kwargs), - retries=req.request_dict.get('retries', 0), - eta=req.eta and req.eta.isoformat(), - expires=req.expires and req.expires.isoformat(), - ) - - if req.eta: - try: - if req.utc: - eta = to_timestamp(to_system_tz(req.eta)) - else: - eta = to_timestamp(req.eta, timezone.local) - except OverflowError as exc: - error("Couldn't convert eta %s to timestamp: %r. Task: %r", - req.eta, exc, req.info(safe=True), exc_info=True) - req.acknowledge() - else: - consumer.qos.increment_eventually() - call_at(eta, apply_eta_task, (req, ), priority=6) - else: - if rate_limits_enabled: - bucket = get_bucket(task.name) - if bucket: - return limit_task(req, bucket, 1) - task_reserved(req) - if callbacks: - [callback() for callback in callbacks] - handle(req) - - return task_message_handler diff --git a/thesisenv/lib/python3.6/site-packages/celery/apps/__init__.py b/thesisenv/lib/python3.6/site-packages/django_common/__init__.py similarity index 100% rename from thesisenv/lib/python3.6/site-packages/celery/apps/__init__.py rename to thesisenv/lib/python3.6/site-packages/django_common/__init__.py diff --git a/thesisenv/lib/python3.6/site-packages/django_common/admin.py b/thesisenv/lib/python3.6/site-packages/django_common/admin.py new file mode 100644 index 0000000..64a48b9 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/admin.py @@ -0,0 +1,460 @@ +from __future__ import print_function, unicode_literals, with_statement, division + +from django.db import models +from django.views.decorators.csrf import csrf_protect +from django.utils.decorators import method_decorator +from django.contrib.admin.options import BaseModelAdmin, ModelAdmin +from django.contrib.admin.helpers import AdminForm +from django.core.exceptions import PermissionDenied +from django.http import Http404 +from django.utils.translation import ugettext as _ +from django.utils.html import escape +from django.forms.formsets import all_valid +from django.contrib.admin import helpers +from django.utils.safestring import mark_safe +from django.forms.models import (inlineformset_factory, BaseInlineFormSet) +from django import forms +from django.utils.functional import curry + +from django_common.compat import (atomic_decorator, force_unicode, + unquote, flatten_fieldsets) + + +csrf_protect_m = method_decorator(csrf_protect) + + +def __init__(self, form, fieldsets, prepopulated_fields, readonly_fields=None, model_admin=None): + """ + Monkey-patch for django 1.5 + """ + def normalize_fieldsets(fieldsets): + """ + Make sure the keys in fieldset dictionaries are strings. Returns the + normalized data. + """ + result = [] + + for name, options in fieldsets: + result.append((name, normalize_dictionary(options))) + + return result + + def normalize_dictionary(data_dict): + """ + Converts all the keys in "data_dict" to strings. The keys must be + convertible using str(). + """ + for key, value in data_dict.items(): + if not isinstance(key, str): + del data_dict[key] + data_dict[str(key)] = value + + return data_dict + + if isinstance(prepopulated_fields, list): + prepopulated_fields = dict() + + self.form, self.fieldsets = form, normalize_fieldsets(fieldsets) + self.prepopulated_fields = [{ + 'field': form[field_name], + 'dependencies': [form[f] for f in dependencies] + } for field_name, dependencies in prepopulated_fields.items()] + + self.model_admin = model_admin + + if readonly_fields is None: + readonly_fields = () + + self.readonly_fields = readonly_fields + +AdminForm.__init__ = __init__ + + +class NestedModelAdmin(ModelAdmin): + + @csrf_protect_m + @atomic_decorator + def add_view(self, request, form_url='', extra_context=None): + """The 'add' admin view for this model.""" + model = self.model + opts = model._meta + + if not self.has_add_permission(request): + raise PermissionDenied + + ModelForm = self.get_form(request) + formsets = [] + + if request.method == 'POST': + form = ModelForm(request.POST, request.FILES) + + if form.is_valid(): + new_object = self.save_form(request, form, change=False) + form_validated = True + else: + form_validated = False + new_object = self.model() + + prefixes = {} + + for FormSet, inline in zip(self.get_formsets(request), + self.get_inline_instances(request)): + prefix = FormSet.get_default_prefix() + prefixes[prefix] = prefixes.get(prefix, 0) + 1 + + if prefixes[prefix] != 1: + prefix = "{0}-{1}".format(prefix, prefixes[prefix]) + + formset = FormSet(data=request.POST, files=request.FILES, + instance=new_object, + save_as_new="_saveasnew" in request.POST, + prefix=prefix, queryset=inline.queryset(request)) + + formsets.append(formset) + + for inline in self.get_inline_instances(request): + # If this is the inline that matches this formset, and + # we have some nested inlines to deal with, then we need + # to get the relevant formset for each of the forms in + # the current formset. + if inline.inlines and inline.model == formset.model: + for nested in inline.inline_instances: + for the_form in formset.forms: + InlineFormSet = nested.get_formset(request, the_form.instance) + prefix = "{0}-{1}".format(the_form.prefix, + InlineFormSet.get_default_prefix()) + formsets.append(InlineFormSet(request.POST, request.FILES, + instance=the_form.instance, + prefix=prefix)) + if all_valid(formsets) and form_validated: + self.save_model(request, new_object, form, change=False) + form.save_m2m() + + for formset in formsets: + self.save_formset(request, form, formset, change=False) + + self.log_addition(request, new_object) + + return self.response_add(request, new_object) + else: + # Prepare the dict of initial data from the request. + # We have to special-case M2Ms as a list of comma-separated PKs. + initial = dict(request.GET.items()) + + for k in initial: + try: + f = opts.get_field(k) + except models.FieldDoesNotExist: + continue + + if isinstance(f, models.ManyToManyField): + initial[k] = initial[k].split(",") + + form = ModelForm(initial=initial) + prefixes = {} + + for FormSet, inline in zip(self.get_formsets(request), + self.get_inline_instances(request)): + prefix = FormSet.get_default_prefix() + prefixes[prefix] = prefixes.get(prefix, 0) + 1 + + if prefixes[prefix] != 1: + prefix = "{0}-{1}".format(prefix, prefixes[prefix]) + + formset = FormSet(instance=self.model(), prefix=prefix, + queryset=inline.queryset(request)) + formsets.append(formset) + + adminForm = helpers.AdminForm(form, list(self.get_fieldsets(request)), + self.prepopulated_fields, self.get_readonly_fields(request), + model_admin=self) + + media = self.media + adminForm.media + inline_admin_formsets = [] + + for inline, formset in zip(self.get_inline_instances(request), formsets): + fieldsets = list(inline.get_fieldsets(request)) + readonly = list(inline.get_readonly_fields(request)) + inline_admin_formset = helpers.InlineAdminFormSet(inline, formset, + fieldsets, readonly, + model_admin=self) + if inline.inlines: + for form in formset.forms: + if form.instance.pk: + instance = form.instance + else: + instance = None + + form.inlines = inline.get_inlines(request, instance, prefix=form.prefix) + + inline_admin_formset.inlines = inline.get_inlines(request) + + inline_admin_formsets.append(inline_admin_formset) + media = media + inline_admin_formset.media + + context = { + 'title': _('Add %s') % force_unicode(opts.verbose_name), + 'adminform': adminForm, + 'is_popup': "_popup" in request.REQUEST, + 'show_delete': False, + 'media': mark_safe(media), + 'inline_admin_formsets': inline_admin_formsets, + 'errors': helpers.AdminErrorList(form, formsets), + 'app_label': opts.app_label, + } + + context.update(extra_context or {}) + + return self.render_change_form(request, context, form_url=form_url, add=True) + + @csrf_protect_m + @atomic_decorator + def change_view(self, request, object_id, extra_context=None, **kwargs): + "The 'change' admin view for this model." + model = self.model + opts = model._meta + obj = self.get_object(request, unquote(object_id)) + + if not self.has_change_permission(request, obj): + raise PermissionDenied + + if obj is None: + raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % + {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)}) + + if request.method == 'POST' and "_saveasnew" in request.POST: + return self.add_view(request, form_url='../add/') + + ModelForm = self.get_form(request, obj) + formsets = [] + + if request.method == 'POST': + form = ModelForm(request.POST, request.FILES, instance=obj) + + if form.is_valid(): + form_validated = True + new_object = self.save_form(request, form, change=True) + else: + form_validated = False + new_object = obj + + prefixes = {} + + for FormSet, inline in zip(self.get_formsets(request, new_object), + self.get_inline_instances(request)): + prefix = FormSet.get_default_prefix() + prefixes[prefix] = prefixes.get(prefix, 0) + 1 + + if prefixes[prefix] != 1: + prefix = "{0}-{1}".format(prefix, prefixes[prefix]) + formset = FormSet(request.POST, request.FILES, + instance=new_object, prefix=prefix, + queryset=inline.queryset(request)) + + formsets.append(formset) + + for inline in self.get_inline_instances(request): + # If this is the inline that matches this formset, and + # we have some nested inlines to deal with, then we need + # to get the relevant formset for each of the forms in + # the current formset. + if inline.inlines and inline.model == formset.model: + for nested in inline.inline_instances: + for the_form in formset.forms: + InlineFormSet = nested.get_formset(request, the_form.instance) + prefix = "{0}-{1}".format(the_form.prefix, + InlineFormSet.get_default_prefix()) + formsets.append(InlineFormSet(request.POST, request.FILES, + instance=the_form.instance, + prefix=prefix)) + if all_valid(formsets) and form_validated: + self.save_model(request, new_object, form, change=True) + form.save_m2m() + + for formset in formsets: + self.save_formset(request, form, formset, change=True) + + change_message = self.construct_change_message(request, form, formsets) + self.log_change(request, new_object, change_message) + + return self.response_change(request, new_object) + + else: + form = ModelForm(instance=obj) + prefixes = {} + + for FormSet, inline in zip(self.get_formsets(request, obj), + self.get_inline_instances(request)): + prefix = FormSet.get_default_prefix() + prefixes[prefix] = prefixes.get(prefix, 0) + 1 + if prefixes[prefix] != 1: + prefix = "{0}-{1}".format(prefix, prefixes[prefix]) + formset = FormSet(instance=obj, prefix=prefix, + queryset=inline.queryset(request)) + formsets.append(formset) + + adminForm = helpers.AdminForm(form, self.get_fieldsets(request, obj), + self.prepopulated_fields, + self.get_readonly_fields(request, obj), + model_admin=self) + media = self.media + adminForm.media + inline_admin_formsets = [] + + for inline, formset in zip(self.get_inline_instances(request), formsets): + fieldsets = list(inline.get_fieldsets(request, obj)) + readonly = list(inline.get_readonly_fields(request, obj)) + inline_admin_formset = helpers.InlineAdminFormSet(inline, formset, fieldsets, + readonly, model_admin=self) + if inline.inlines: + for form in formset.forms: + if form.instance.pk: + instance = form.instance + else: + instance = None + + form.inlines = inline.get_inlines(request, instance, prefix=form.prefix) + + inline_admin_formset.inlines = inline.get_inlines(request) + + inline_admin_formsets.append(inline_admin_formset) + media = media + inline_admin_formset.media + + context = { + 'title': _('Change %s') % force_unicode(opts.verbose_name), + 'adminform': adminForm, + 'object_id': object_id, + 'original': obj, + 'is_popup': "_popup" in request.REQUEST, + 'media': mark_safe(media), + 'inline_admin_formsets': inline_admin_formsets, + 'errors': helpers.AdminErrorList(form, formsets), + 'app_label': opts.app_label, + } + + context.update(extra_context or {}) + + return self.render_change_form(request, context, change=True, obj=obj) + + def get_inlines(self, request, obj=None, prefix=None): + nested_inlines = [] + + for inline in self.get_inline_instances(request): + FormSet = inline.get_formset(request, obj) + prefix = "{0}-{1}".format(prefix, FormSet.get_default_prefix()) + formset = FormSet(instance=obj, prefix=prefix) + fieldsets = list(inline.get_fieldsets(request, obj)) + nested_inline = helpers.InlineAdminFormSet(inline, formset, fieldsets) + nested_inlines.append(nested_inline) + + return nested_inlines + + +class NestedTabularInline(BaseModelAdmin): + """ + Options for inline editing of ``model`` instances. + + Provide ``name`` to specify the attribute name of the ``ForeignKey`` from + ``model`` to its parent. This is required if ``model`` has more than one + ``ForeignKey`` to its parent. + """ + model = None + fk_name = None + formset = BaseInlineFormSet + extra = 3 + max_num = None + template = None + verbose_name = None + verbose_name_plural = None + can_delete = True + template = 'common/admin/nested_tabular.html' + inlines = [] + + def __init__(self, parent_model, admin_site): + self.admin_site = admin_site + self.parent_model = parent_model + self.opts = self.model._meta + super(NestedTabularInline, self).__init__() + + if self.verbose_name is None: + self.verbose_name = self.model._meta.verbose_name + + if self.verbose_name_plural is None: + self.verbose_name_plural = self.model._meta.verbose_name_plural + + self.inline_instances = [] + + for inline_class in self.inlines: + inline_instance = inline_class(self.model, self.admin_site) + self.inline_instances.append(inline_instance) + + def _media(self): + from django.conf import settings + + js = ['js/jquery.min.js', 'js/jquery.init.js', 'js/inlines.min.js'] + + if self.prepopulated_fields: + js.append('js/urlify.js') + js.append('js/prepopulate.min.js') + + if self.filter_vertical or self.filter_horizontal: + js.extend(['js/SelectBox.js', 'js/SelectFilter2.js']) + + return forms.Media(js=['{0}{1}'.format(settings.ADMIN_MEDIA_PREFIX, url) for url in js]) + + media = property(_media) + + def get_formset(self, request, obj=None, **kwargs): + """ + Returns a BaseInlineFormSet class for use in admin add/change views. + """ + if self.declared_fieldsets: + fields = flatten_fieldsets(self.declared_fieldsets) + else: + fields = None + if self.exclude is None: + exclude = [] + else: + exclude = list(self.exclude) + + exclude.extend(kwargs.get("exclude", [])) + exclude.extend(self.get_readonly_fields(request, obj)) + + # if exclude is an empty list we use None, since that's the actual + # default + exclude = exclude or None + defaults = { + "form": self.form, + "formset": self.formset, + "fk_name": self.fk_name, + "fields": fields, + "exclude": exclude, + "formfield_callback": curry(self.formfield_for_dbfield, request=request), + "extra": self.extra, + "max_num": self.max_num, + "can_delete": self.can_delete, + } + defaults.update(kwargs) + + return inlineformset_factory(self.parent_model, self.model, **defaults) + + def get_fieldsets(self, request, obj=None): + if self.declared_fieldsets: + return self.declared_fieldsets + + form = self.get_formset(request).form + fields = form.base_fields.keys() + list(self.get_readonly_fields(request, obj)) + + return [(None, {'fields': fields})] + + def get_inlines(self, request, obj=None, prefix=None): + nested_inlines = [] + + for inline in self.inline_instances: + FormSet = inline.get_formset(request, obj) + prefix = "{0}-{1}".format(prefix, FormSet.get_default_prefix()) + formset = FormSet(instance=obj, prefix=prefix) + fieldsets = list(inline.get_fieldsets(request, obj)) + nested_inline = helpers.InlineAdminFormSet(inline, formset, fieldsets) + nested_inlines.append(nested_inline) + + return nested_inlines diff --git a/thesisenv/lib/python3.6/site-packages/django_common/auth_backends.py b/thesisenv/lib/python3.6/site-packages/django_common/auth_backends.py new file mode 100644 index 0000000..49e5901 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/auth_backends.py @@ -0,0 +1,23 @@ +from __future__ import print_function, unicode_literals, with_statement, division + +import logging + +from django.contrib.auth import get_user_model +from django.contrib.auth.backends import ModelBackend + +User = get_user_model() + + +class EmailBackend(ModelBackend): + def authenticate(self, username=None, password=None, **kwargs): + """ + "username" being passed is really email address and being compared to as such. + """ + try: + user = User.objects.get(email=username) + if user.check_password(password): + return user + except (User.DoesNotExist, User.MultipleObjectsReturned): + logging.warning('Unsuccessful login attempt using username/email: {0}'.format(username)) + + return None diff --git a/thesisenv/lib/python3.6/site-packages/django_common/classmaker.py b/thesisenv/lib/python3.6/site-packages/django_common/classmaker.py new file mode 100644 index 0000000..9c91b83 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/classmaker.py @@ -0,0 +1,64 @@ +# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/204197 +from __future__ import print_function, unicode_literals, with_statement, division + +import types +import inspect + +# preliminary: two utility functions + + +def skip_redundant(iterable, skipset=None): + """ + Redundant items are repeated items or items in the original skipset. + """ + if skipset is None: + skipset = set() + for item in iterable: + if item not in skipset: + skipset.add(item) + yield item + + +def remove_redundant(metaclasses): + skipset = set([types.ClassType]) + for meta in metaclasses: # determines the metaclasses to be skipped + skipset.update(inspect.getmro(meta)[1:]) + return tuple(skip_redundant(metaclasses, skipset)) + + +# now the core of the module: two mutually recursive functions + +memoized_metaclasses_map = {} + + +def get_noconflict_metaclass(bases, left_metas, right_metas): + """ + Not intended to be used outside of this module, unless you know what you are doing. + """ + # make tuple of needed metaclasses in specified priority order + metas = left_metas + tuple(map(type, bases)) + right_metas + needed_metas = remove_redundant(metas) + + # return existing confict-solving meta, if any + if needed_metas in memoized_metaclasses_map: + return memoized_metaclasses_map[needed_metas] + # nope: compute, memoize and return needed conflict-solving meta + elif not needed_metas: # wee, a trivial case, happy us + meta = type + elif len(needed_metas) == 1: # another trivial case + meta = needed_metas[0] + # check for recursion, can happen i.e. for Zope ExtensionClasses + elif needed_metas == bases: + raise TypeError("Incompatible root metatypes", needed_metas) + else: # gotta work ... + metaname = '_' + ''.join([m.__name__ for m in needed_metas]) + meta = classmaker()(metaname, needed_metas, {}) + memoized_metaclasses_map[needed_metas] = meta + return meta + + +def classmaker(left_metas=(), right_metas=()): + def make_class(name, bases, adict): + metaclass = get_noconflict_metaclass(bases, left_metas, right_metas) + return metaclass(name, bases, adict) + return make_class diff --git a/thesisenv/lib/python3.6/site-packages/django_common/compat.py b/thesisenv/lib/python3.6/site-packages/django_common/compat.py new file mode 100644 index 0000000..256e4db --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/compat.py @@ -0,0 +1,33 @@ +from __future__ import print_function, unicode_literals, with_statement, division + +import sys +from django import VERSION +from django.db import transaction +from django.utils import encoding + +PY2 = sys.version_info[0] == 2 + +# commit_on_success was removed in 1.8, use atomic +if hasattr(transaction, 'atomic'): + atomic_decorator = getattr(transaction, 'atomic') +else: + atomic_decorator = getattr(transaction, 'commit_on_success') + +# ugly hack required for Python 2/3 compat +if hasattr(encoding, 'force_unicode'): + force_unicode = encoding.force_unicode +elif hasattr(encoding, 'force_text'): + force_unicode = encoding.force_text +else: + force_unicode = lambda x: x + + +if (VERSION[0] == 1 and VERSION[1] >= 8) or VERSION[0] > 1: + from django.contrib.admin.utils import unquote, flatten_fieldsets +else: + from django.contrib.admin.util import unquote, flatten_fieldsets + +if not PY2: + string_types = (str,) +else: + string_types = (str, unicode) diff --git a/thesisenv/lib/python3.6/site-packages/django_common/context_processors.py b/thesisenv/lib/python3.6/site-packages/django_common/context_processors.py new file mode 100644 index 0000000..b2b47ab --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/context_processors.py @@ -0,0 +1,14 @@ +from __future__ import print_function, unicode_literals, with_statement, division + +from django.conf import settings as django_settings +from django_common.session import SessionManager + + +def common_settings(request): + return { + 'domain_name': django_settings.DOMAIN_NAME, + 'www_root': django_settings.WWW_ROOT, + 'is_dev': django_settings.IS_DEV, + 'is_prod': django_settings.IS_PROD, + 'usertime': SessionManager(request).get_usertime() + } diff --git a/thesisenv/lib/python3.6/site-packages/django_common/db_fields.py b/thesisenv/lib/python3.6/site-packages/django_common/db_fields.py new file mode 100644 index 0000000..f844a08 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/db_fields.py @@ -0,0 +1,266 @@ +from __future__ import print_function, unicode_literals, with_statement, division + +import binascii +import random +import string + +from django.db.models import fields +from django.template.defaultfilters import slugify +from django.db import models +from django.core.serializers.json import DjangoJSONEncoder +try: + import json +except ImportError: + from django.utils import simplejson as json + + +from django import forms +from django.conf import settings + +from django_common.compat import string_types +from django_common.helper import md5_hash + + +class JSONField(models.TextField): + """ + JSONField is a generic textfield that neatly serializes/unserializes JSON objects seamlessly + """ + + def from_db_value(self, value, expression, connection, context): + return self.to_python(value) + + def to_python(self, value): + """Convert our string value to JSON after we load it from the DB""" + + if value == "": + return None + + try: + if isinstance(value, string_types): + return json.loads(value) + except ValueError: + pass + + return value + + def get_prep_value(self, value): + """Convert our JSON object to a string before we save""" + + if value == "": + return None + + if isinstance(value, dict): + value = json.dumps(value, cls=DjangoJSONEncoder) + + return value + + +class UniqueSlugField(fields.SlugField): + """ + Represents a self-managing sluf field, that makes sure that the slug value is unique on + the db table. Slugs by default get a db_index on them. The "Unique" in the class name is + a misnomer since it does support unique=False + + @requires "prepopulate_from" in the constructor. This could be a field or a function in the + model class which is using this field + + Defaults update_on_save to False + + Taken and edited from: http://www.djangosnippets.org/snippets/728/ + """ + def __init__(self, prepopulate_from='id', *args, **kwargs): + if kwargs.get('update_on_save'): + self.__update_on_save = kwargs.pop('update_on_save') + else: + self.__update_on_save = False + self.prepopulate_from = prepopulate_from + super(UniqueSlugField, self).__init__(*args, **kwargs) + + def deconstruct(self): + name, path, args, kwargs = super(UniqueSlugField, self).deconstruct() + kwargs['prepopulate_from'] = self.prepopulate_from + return name, path, args, kwargs + + def pre_save(self, model_instance, add): + prepopulate_field = getattr(model_instance, self.prepopulate_from) + if callable(prepopulate_field): + prepopulate_value = prepopulate_field() + else: + prepopulate_value = prepopulate_field + + # if object has an id, and not to update on save, + # then return existig model instance's slug value + if getattr(model_instance, 'id') and not self.__update_on_save: + return getattr(model_instance, self.name) + + # if this is a previously saved object, and current + # instance's slug is same as one being proposed + if getattr(model_instance, 'id') \ + and getattr(model_instance, self.name) == slugify(prepopulate_value): + return getattr(model_instance, self.name) + + # if a unique slug is not required (not the default of course) + if not self.unique: + return self.__set_and_return(model_instance, self.name, slugify(prepopulate_value)) + + return self.__unique_slug(model_instance.__class__, model_instance, self.name, + prepopulate_value) + + def __unique_slug(self, model, model_instance, slug_field, slug_value): + orig_slug = slug = slugify(slug_value) + index = 1 + while True: + try: + model.objects.get(**{slug_field: slug}) + index += 1 + slug = orig_slug + '-' + str(index) + except model.DoesNotExist: + return self.__set_and_return(model_instance, slug_field, slug) + + def __set_and_return(self, model_instance, slug_field, slug): + setattr(model_instance, slug_field, slug) + return slug + +try: + from south.modelsinspector import add_introspection_rules + add_introspection_rules([ + ( + [UniqueSlugField], # Class(es) these apply to + [], # Positional arguments (not used) + { # Keyword argument + "prepopulate_from": ["prepopulate_from", {"default": 'id'}], + }, + ), + ], ["^django_common\.db_fields\.UniqueSlugField"]) +except ImportError: + pass + + +class RandomHashField(fields.CharField): + """ + Store a random hash for a certain model field. + + @param update_on_save optional field whether to update this hash or not, + everytime the model instance is saved + """ + def __init__(self, update_on_save=False, hash_length=None, *args, **kwargs): + # TODO: args & kwargs serve no purpose but to make django evolution to work + self.update_on_save = update_on_save + self.hash_length = hash_length + super(fields.CharField, self).__init__( + max_length=128, unique=True, blank=False, null=False, db_index=True, + default=md5_hash(max_length=self.hash_length)) + + def pre_save(self, model_instance, add): + if not add and not self.update_on_save: + return getattr(model_instance, self.name) + + random_hash = md5_hash(max_length=self.hash_length) + setattr(model_instance, self.name, random_hash) + return random_hash + +try: + from south.modelsinspector import add_introspection_rules + add_introspection_rules([ + ( + [RandomHashField], # Class(es) these apply to + [], # Positional arguments (not used) + { # Keyword argument + "update_on_save": ["update_on_save", {"default": False}], + "hash_length": ["hash_length", {"default": None}], + }, + ), + ], ["^django_common\.db_fields\.RandomHashField"]) +except ImportError: + pass + + +class BaseEncryptedField(models.Field): + """ + This code is based on the djangosnippet #1095 + You can find the original at http://www.djangosnippets.org/snippets/1095/ + """ + def __init__(self, *args, **kwargs): + cipher = kwargs.pop('cipher', 'AES') + imp = __import__('Crypto.Cipher', globals(), locals(), [bytes(cipher)], -1) + self.cipher = getattr(imp, cipher).new(settings.SECRET_KEY[:32]) + self.prefix = '${0}$'.format(cipher) + + max_length = kwargs.get('max_length', 40) + mod = max_length % self.cipher.block_size + if mod > 0: + max_length += self.cipher.block_size - mod + kwargs['max_length'] = max_length * 2 + len(self.prefix) + + models.Field.__init__(self, *args, **kwargs) + + def _is_encrypted(self, value): + return isinstance(value, string_types) and value.startswith(self.prefix) + + def _get_padding(self, value): + mod = len(value) % self.cipher.block_size + if mod > 0: + return self.cipher.block_size - mod + return 0 + + def to_python(self, value): + if self._is_encrypted(value): + return self.cipher.decrypt(binascii.a2b_hex(value[len(self.prefix):])).split('\0')[0] + return value + + def get_db_prep_value(self, value, connection=None, prepared=None): + if value is not None and not self._is_encrypted(value): + padding = self._get_padding(value) + if padding > 0: + suffix = [random.choice(string.printable) for _ in range(padding - 1)] + value += "\0" + ''.join(suffix) + value = self.prefix + binascii.b2a_hex(self.cipher.encrypt(value)) + return value + + +class EncryptedTextField(BaseEncryptedField): + + def from_db_value(self, value, expression, connection, context): + return self.to_python(value) + + def get_internal_type(self): + return 'TextField' + + def formfield(self, **kwargs): + defaults = {'widget': forms.Textarea} + defaults.update(kwargs) + return super(EncryptedTextField, self).formfield(**defaults) + +try: + from south.modelsinspector import add_introspection_rules + add_introspection_rules([ + ( + [EncryptedTextField], [], {}, + ), + ], ["^django_common\.db_fields\.EncryptedTextField"]) +except ImportError: + pass + + +class EncryptedCharField(BaseEncryptedField): + + def from_db_value(self, value, expression, connection, context): + return self.to_python(value) + + def get_internal_type(self): + return "CharField" + + def formfield(self, **kwargs): + defaults = {'max_length': self.max_length} + defaults.update(kwargs) + return super(EncryptedCharField, self).formfield(**defaults) + +try: + from south.modelsinspector import add_introspection_rules + add_introspection_rules([ + ( + [EncryptedCharField], [], {}, + ), + ], ["^django_common\.db_fields\.EncryptedCharField"]) +except ImportError: + pass diff --git a/thesisenv/lib/python3.6/site-packages/django_common/decorators.py b/thesisenv/lib/python3.6/site-packages/django_common/decorators.py new file mode 100644 index 0000000..c9949af --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/decorators.py @@ -0,0 +1,59 @@ +from __future__ import print_function, unicode_literals, with_statement, division + +try: + from functools import wraps +except ImportError: + from django.utils.functional import wraps + +import inspect + +from django.conf import settings +from django.http import HttpResponseRedirect + + +def ssl_required(allow_non_ssl=False): + """ + Views decorated with this will always get redirected to https + except when allow_non_ssl is set to true. + """ + def wrapper(view_func): + def _checkssl(request, *args, **kwargs): + # allow_non_ssl=True lets non-https requests to come + # through to this view (and hence not redirect) + if hasattr(settings, 'SSL_ENABLED') and settings.SSL_ENABLED \ + and not request.is_secure() and not allow_non_ssl: + return HttpResponseRedirect( + request.build_absolute_uri().replace('http://', 'https://')) + return view_func(request, *args, **kwargs) + + return _checkssl + return wrapper + + +def disable_for_loaddata(signal_handler): + """ + See: https://code.djangoproject.com/ticket/8399 + Disables signal from firing if its caused because of loaddata + """ + @wraps(signal_handler) + def wrapper(*args, **kwargs): + for fr in inspect.stack(): + if inspect.getmodulename(fr[1]) == 'loaddata': + return + signal_handler(*args, **kwargs) + return wrapper + + +def anonymous_required(view, redirect_to=None): + """ + Only allow if user is NOT authenticated. + """ + if redirect_to is None: + redirect_to = settings.LOGIN_REDIRECT_URL + + @wraps(view) + def wrapper(request, *a, **k): + if request.user and request.user.is_authenticated(): + return HttpResponseRedirect(redirect_to) + return view(request, *a, **k) + return wrapper diff --git a/thesisenv/lib/python3.6/site-packages/django_common/email_backends.py b/thesisenv/lib/python3.6/site-packages/django_common/email_backends.py new file mode 100644 index 0000000..5617304 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/email_backends.py @@ -0,0 +1,70 @@ +from __future__ import print_function, unicode_literals, with_statement, division + +import os + +from django.conf import settings + +from django.core.mail.backends.smtp import EmailBackend +from django.core.mail.backends.filebased import EmailBackend as FileEmailBackend +from django.core.mail import message + + +class TestEmailBackend(EmailBackend): + """ + Email Backend to overwrite TO, CC and BCC in all outgoing emails to custom + values. + + Sample values from setting.py: + EMAIL_BACKEND = 'django_common.email_backends.TestEmailBackend' + TEST_EMAIL_TO = ['dev@tivix.com'] # default are addresses form ADMINS + TEST_EMAIL_CC = ['dev-cc@tivix.com'] # default is empty list + TEST_EMAIL_BCC = ['dev-bcc@tivix.com'] # default is empty list + """ + + def _send(self, email_message): + """A helper method that does the actual sending.""" + if not email_message.recipients(): + return False + from_email = email_message.from_email + if hasattr(message, 'sanitize_address'): + from_email = message.sanitize_address(email_message.from_email, + email_message.encoding) + if hasattr(settings, 'TEST_EMAIL_TO'): + email_message.to = settings.TEST_EMAIL_TO + else: + email_message.to = dict(getattr(settings, 'ADMINS', ())).values() + email_message.cc = getattr(settings, 'TEST_EMAIL_CC', []) + email_message.bcc = getattr(settings, 'TEST_EMAIL_BCC', []) + if hasattr(message, 'sanitize_address'): + recipients = [message.sanitize_address(addr, email_message.encoding) + for addr in email_message.recipients()] + else: + recipients = email_message.recipients() + try: + self.connection.sendmail(from_email, recipients, + email_message.message().as_string()) + except: + if not self.fail_silently: + raise + return False + return True + + +class CustomFileEmailBackend(FileEmailBackend): + """ + Email Backend to save emails as file with custom extension. It makes easier + to open emails in email applications, f.e. with eml extension for mozilla + thunderbird. + + Sample values from setting.py: + EMAIL_BACKEND = 'django_common.email_backends.CustomFileEmailBackend' + EMAIL_FILE_PATH = '/email/file/path/' + EMAIL_FILE_EXT = 'eml' + """ + + def _get_filename(self): + filename = super(CustomFileEmailBackend, self)._get_filename() + if hasattr(settings, 'EMAIL_FILE_EXT'): + filename = '{0}.{1}'.format(os.path.splitext(filename)[0], + settings.EMAIL_FILE_EXT.strip('.')) + return filename diff --git a/thesisenv/lib/python3.6/site-packages/django_common/helper.py b/thesisenv/lib/python3.6/site-packages/django_common/helper.py new file mode 100644 index 0000000..9367eda --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/helper.py @@ -0,0 +1,232 @@ +"Some common routines that can be used throughout the code." +from __future__ import print_function, unicode_literals, with_statement, division + +import hashlib +import os +import logging +import datetime +import threading + +try: + import json +except ImportError: + from django.utils import simplejson as json +from django.utils.encoding import force_text +from django.template import Context +from django.template.loader import get_template +from django.core import exceptions + +from django_common.tzinfo import utc, Pacific + + +class AppException(exceptions.ValidationError): + """ + Base class for exceptions used in our system. + + A common base class permits application code to distinguish between exceptions raised in + our code from ones raised in libraries. + """ + pass + + +class InvalidContentType(AppException): + def __init__(self, file_types, msg=None): + if not msg: + msg = 'Only the following file ' \ + 'content types are permitted: {0}'.format(str(file_types)) + super(self.__class__, self).__init__(msg) + self.file_types = file_types + + +class FileTooLarge(AppException): + def __init__(self, file_size_kb, msg=None): + if not msg: + msg = 'Files may not be larger than {0} KB'.format(file_size_kb) + super(self.__class__, self).__init__(msg) + self.file_size = file_size_kb + + +def get_class(kls): + """ + Converts a string to a class. + Courtesy: + http://stackoverflow.com/q/452969/#452981 + """ + parts = kls.split('.') + module = ".".join(parts[:-1]) + m = __import__(module) + for comp in parts[1:]: + m = getattr(m, comp) + return m + + +def is_among(value, *possibilities): + """ + Ensure that the method that has been used for the request is one + of the expected ones (e.g., GET or POST). + """ + for possibility in possibilities: + if value == possibility: + return True + raise Exception('A different request value was encountered than expected: {0}'.format(value)) + + +def form_errors_serialize(form): + errors = {} + for field in form.fields.keys(): + if field in form.errors: + if form.prefix: + errors['{0}-{1}'.format(form.prefix, field)] = force_text(form.errors[field]) + else: + errors[field] = force_text(form.errors[field]) + + if form.non_field_errors(): + errors['non_field_errors'] = force_text(form.non_field_errors()) + return {'errors': errors} + + +def json_response(data=None, errors=None, success=True): + if not errors: + errors = [] + if not data: + data = {} + data.update({ + 'errors': errors, + 'success': len(errors) == 0 and success, + }) + return json.dumps(data) + + +def sha224_hash(): + return hashlib.sha224(os.urandom(224)).hexdigest() + + +def sha1_hash(): + return hashlib.sha1(os.urandom(224)).hexdigest() + + +def md5_hash(image=None, max_length=None): + # TODO: Figure out how much entropy is actually needed, and reduce the current number + # of bytes if possible if doing so will result in a performance improvement. + if max_length: + assert max_length > 0 + + ret = hashlib.md5(image or os.urandom(224)).hexdigest() + return ret if not max_length else ret[:max_length] + + +def start_thread(target, *args): + t = threading.Thread(target=target, args=args) + t.setDaemon(True) + t.start() + + +def send_mail(subject, message, from_email, recipient_emails, files=None, + html=False, reply_to=None, bcc=None, cc=None, files_manually=None): + """ + Sends email with advanced optional parameters + + To attach non-file content (e.g. content not saved on disk), use + files_manually parameter and provide list of 3 element tuples, e.g. + [('design.png', img_data, 'image/png'),] which will be passed to + email.attach(). + """ + import django.core.mail + try: + logging.debug('Sending mail to: {0}'.format(', '.join(r for r in recipient_emails))) + logging.debug('Message: {0}'.format(message)) + email = django.core.mail.EmailMessage(subject, message, from_email, recipient_emails, + bcc, cc=cc) + if html: + email.content_subtype = "html" + if files: + for file in files: + email.attach_file(file) + if files_manually: + for filename, content, mimetype in files_manually: + email.attach(filename, content, mimetype) + if reply_to: + email.extra_headers = {'Reply-To': reply_to} + email.send() + except Exception as e: + # TODO: Raise error again so that more information is included in the logs? + logging.error('Error sending message [{0}] from {1} to {2} {3}'.format( + subject, from_email, recipient_emails, e)) + + +def send_mail_in_thread(subject, message, from_email, recipient_emails, files=None, html=False, + reply_to=None, bcc=None, cc=None, files_manually=None): + start_thread(send_mail, subject, message, from_email, recipient_emails, files, html, + reply_to, bcc, cc, files_manually) + + +def send_mail_using_template(subject, template_name, from_email, recipient_emails, context_map, + in_thread=False, files=None, html=False, reply_to=None, bcc=None, + cc=None, files_manually=None): + t = get_template(template_name) + message = t.render(context_map) + if in_thread: + return send_mail_in_thread(subject, message, from_email, recipient_emails, files, html, + reply_to, bcc, cc, files_manually) + else: + return send_mail(subject, message, from_email, recipient_emails, files, html, reply_to, + bcc, cc, files_manually) + + +def utc_to_pacific(timestamp): + return timestamp.replace(tzinfo=utc).astimezone(Pacific) + + +def pacific_to_utc(timestamp): + return timestamp.replace(tzinfo=Pacific).astimezone(utc) + + +def humanize_time_since(timestamp=None): + """ + Returns a fuzzy time since. Will only return the largest time. EX: 20 days, 14 min + """ + timeDiff = datetime.datetime.now() - timestamp + days = timeDiff.days + hours = timeDiff.seconds / 3600 + minutes = timeDiff.seconds % 3600 / 60 + seconds = timeDiff.seconds % 3600 % 60 + + str = "" + if days > 0: + if days == 1: + t_str = "day" + else: + t_str = "days" + str += "{0} {1}".format(days, t_str) + return str + elif hours > 0: + if hours == 1: + t_str = "hour" + else: + t_str = "hours" + str += "{0} {1}".format(hours, t_str) + return str + elif minutes > 0: + if minutes == 1: + t_str = "min" + else: + t_str = "mins" + str += "{0} {1}".format(minutes, t_str) + return str + elif seconds > 0: + if seconds == 1: + t_str = "sec" + else: + t_str = "secs" + str += "{0} {1}".format(seconds, t_str) + return str + else: + return str + + +def chunks(l, n): + """ + split successive n-sized chunks from a list. + """ + for i in range(0, len(l), n): + yield l[i:i + n] diff --git a/thesisenv/lib/python3.6/site-packages/django_common/http.py b/thesisenv/lib/python3.6/site-packages/django_common/http.py new file mode 100644 index 0000000..2c6daf4 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/http.py @@ -0,0 +1,58 @@ +from __future__ import print_function, unicode_literals, with_statement, division + +from django.http import HttpResponse + +try: + import json +except ImportError: + from django.utils import simplejson as json + + +class JsonResponse(HttpResponse): + def __init__(self, data=None, errors=None, success=True): + """ + data is a map, errors a list + """ + if not errors: + errors = [] + if not data: + data = {} + json_resp = json_response(data=data, errors=errors, success=success) + super(JsonResponse, self).__init__(json_resp, content_type='application/json') + + +class JsonpResponse(HttpResponse): + """ + Padded JSON response, used for widget XSS + """ + def __init__(self, request, data=None, errors=None, success=True): + """ + data is a map, errors a list + """ + if not errors: + errors = [] + if not data: + data = {} + json_resp = json_response(data=data, errors=errors, success=success) + js = "{0}({1})".format(request.GET.get("jsonp", "jsonp_callback"), json_resp) + super(JsonpResponse, self).__init__(js, mimetype='application/javascipt') + + +def json_response(data=None, errors=None, success=True): + if not errors: + errors = [] + if not data: + data = {} + data.update({ + 'errors': errors, + 'success': len(errors) == 0 and success, + }) + return json.dumps(data) + + +class XMLResponse(HttpResponse): + def __init__(self, data): + """ + data is the entire xml body/document + """ + super(XMLResponse, self).__init__(data, mimetype='text/xml') diff --git a/thesisenv/lib/python3.6/site-packages/celery/contrib/__init__.py b/thesisenv/lib/python3.6/site-packages/django_common/management/__init__.py similarity index 100% rename from thesisenv/lib/python3.6/site-packages/celery/contrib/__init__.py rename to thesisenv/lib/python3.6/site-packages/django_common/management/__init__.py diff --git a/thesisenv/lib/python3.6/site-packages/celery/fixups/__init__.py b/thesisenv/lib/python3.6/site-packages/django_common/management/commands/__init__.py similarity index 100% rename from thesisenv/lib/python3.6/site-packages/celery/fixups/__init__.py rename to thesisenv/lib/python3.6/site-packages/django_common/management/commands/__init__.py diff --git a/thesisenv/lib/python3.6/site-packages/django_common/management/commands/generate_secret_key.py b/thesisenv/lib/python3.6/site-packages/django_common/management/commands/generate_secret_key.py new file mode 100644 index 0000000..de7dcc4 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/management/commands/generate_secret_key.py @@ -0,0 +1,34 @@ +from __future__ import print_function, unicode_literals, with_statement, division + +from django.core.management.base import BaseCommand +from django.utils.crypto import get_random_string +from django.utils.translation import ugettext as _ + +import string + + +class Command(BaseCommand): + help = _('This command generates SECRET_KEY') + + # Default length is 50 + length = 50 + + # Allowed characters + allowed_chars = string.ascii_lowercase + string.ascii_uppercase + string.digits + string.punctuation + + def add_arguments(self, parser): + """ + Define optional arguments with default values + """ + parser.add_argument('--length', default=self.length, + type=int, help=_('SECRET_KEY length default=%d' % self.length)) + + parser.add_argument('--alphabet', default=self.allowed_chars, + type=str, help=_('alphabet to use default=%s' % self.allowed_chars)) + + def handle(self, *args, **options): + length = options.get('length') + alphabet = options.get('alphabet') + secret_key = str(get_random_string(length=length, allowed_chars=alphabet)) + + print('SECRET_KEY: %s' % secret_key) diff --git a/thesisenv/lib/python3.6/site-packages/django_common/management/commands/scaffold.py b/thesisenv/lib/python3.6/site-packages/django_common/management/commands/scaffold.py new file mode 100644 index 0000000..1b7f709 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/management/commands/scaffold.py @@ -0,0 +1,51 @@ +from __future__ import print_function, unicode_literals, with_statement, division + +from django.core.management.base import BaseCommand + +from django_common.scaffold import Scaffold +from django_common import settings + + +class Command(BaseCommand): + def add_arguments(self, parser): + parser.add_argument('app_name', nargs='*') + parser.add_argument( + '--model', default=None, dest='model', nargs='+', help=""" + model name - only one model name per run is allowed. \n + It requires additional fields parameters: + + char - CharField \t\t\t\t + text - TextField \t\t\t\t + int - IntegerFIeld \t\t\t\t + decimal -DecimalField \t\t\t\t + datetime - DateTimeField \t\t\t\t + foreign - ForeignKey \t\t\t\t + + Example usages: \t\t\t\t + + --model forum char:title text:body int:posts datetime:create_date \t\t + --model blog foreign:blog:Blog, foreign:post:Post, foreign:added_by:User \t\t + --model finance decimal:total_cost:10:2 + + """ + ) + + def handle(self, *args, **options): + if len(options['app_name']) == 0: + print("You must provide app name. For example:\n\npython manage.py scallfold my_app\n") + return + + app_name = options['app_name'][0] + model_data = options['model'] + if model_data: + model_name = model_data[0] + fields = model_data[1:] + else: + model_name = None + fields = None + + scaffold = Scaffold(app_name, model_name, fields) + scaffold.run() + + def get_version(self): + return 'django-common version: {0}'.format(settings.VERSION) diff --git a/thesisenv/lib/python3.6/site-packages/django_common/middleware.py b/thesisenv/lib/python3.6/site-packages/django_common/middleware.py new file mode 100644 index 0000000..46aa1ae --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/middleware.py @@ -0,0 +1,65 @@ +from __future__ import print_function, unicode_literals, with_statement, division + +from django.conf import settings +from django.http import HttpResponsePermanentRedirect, HttpResponseRedirect +from django_common.session import SessionManager + +WWW = 'www' + + +class WWWRedirectMiddleware(object): + """ + Redirect requests for example from http://www.mysirw.com/* to http://mysite.com/* + """ + def process_request(self, request): + if settings.IS_PROD and request.get_host() != settings.DOMAIN_NAME: + proto_suffix = 's' if request.is_secure() else '' + url = 'http{0}://{1}{2}'.format(proto_suffix, settings.DOMAIN_NAME, + request.get_full_path()) + return HttpResponsePermanentRedirect(url) + return None + + +class UserTimeTrackingMiddleware(object): + """ + Tracking time user have been on site + """ + def process_request(self, request): + if request.user and request.user.is_authenticated(): + SessionManager(request).ping_usertime() + else: + SessionManager(request).clear_usertime() + + +class SSLRedirectMiddleware(object): + """ + Redirects all the requests that are non SSL to a SSL url + """ + def process_request(self, request): + if not request.is_secure(): + url = 'https://{0}{1}'.format(settings.DOMAIN_NAME, request.get_full_path()) + return HttpResponseRedirect(url) + return None + + +class NoSSLRedirectMiddleware(object): + """ + Redirects if a non-SSL required view is hit. This middleware assumes a SSL protected view + has been decorated by the 'ssl_required' decorator (see decorators.py) + + Redirects to https for admin though only for PROD + """ + __DECORATOR_INNER_FUNC_NAME = '_checkssl' + + def __is_in_admin(self, request): + return True if request.path.startswith('/admin/') else False + + def process_view(self, request, view_func, view_args, view_kwargs): + if view_func.func_name != self.__DECORATOR_INNER_FUNC_NAME \ + and not (self.__is_in_admin(request) and settings.IS_PROD) \ + and request.is_secure(): # request is secure, but view is not decorated + url = 'http://{0}{1}'.format(settings.DOMAIN_NAME, request.get_full_path()) + return HttpResponseRedirect(url) + elif self.__is_in_admin(request) and not request.is_secure() and settings.IS_PROD: + url = 'https://{0}{1}'.format(settings.DOMAIN_NAME, request.get_full_path()) + return HttpResponseRedirect(url) diff --git a/thesisenv/lib/python3.6/site-packages/django_common/mixin.py b/thesisenv/lib/python3.6/site-packages/django_common/mixin.py new file mode 100644 index 0000000..d16e3f4 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/mixin.py @@ -0,0 +1,10 @@ +from __future__ import print_function, unicode_literals, with_statement, division + +from django.contrib.auth.decorators import login_required +from django.utils.decorators import method_decorator + + +class LoginRequiredMixin(object): + @method_decorator(login_required) + def dispatch(self, request, *args, **kwargs): + return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/django_common/scaffold.py b/thesisenv/lib/python3.6/site-packages/django_common/scaffold.py new file mode 100644 index 0000000..66c0ec8 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/scaffold.py @@ -0,0 +1,757 @@ +from __future__ import print_function, unicode_literals, with_statement, division + +from os import path, system, listdir, sys, mkdir +from django.conf import settings +# VIEW CONSTS + +LIST_VIEW = """ +from %(app)s.forms import %(model)sForm +def %(lower_model)s_list(request, template='%(lower_model)s/list.html'): + d = {} + d['form'] = %(model)sForm() + if request.method == 'POST': + form = %(model)sForm(request.POST) + if form.is_valid(): + item = form.save() + return JsonResponse(data={'id': item.id, 'name': str(item), 'form': %(model)sForm().as_p(), 'token': get_token(request)}) + else: + d['form'] = form + return JsonResponse(data={'form': d['form'].as_p(), 'token': get_token(request)}, success=False) + d['%(lower_model)s_list'] = %(model)s.objects.all() + return render(request, template, d) +""" + +DETAILS_VIEW = """ +from %(app)s.forms import %(model)sForm +def %(lower_model)s_details(request, id, template='%(lower_model)s/details.html'): + d = {} + item = get_object_or_404(%(model)s, pk=id) + d['form'] = %(model)sForm(instance=item) + if request.method == 'POST': + form = %(model)sForm(request.POST, instance=item) + if form.is_valid(): + item = form.save() + return JsonResponse(data={'form': %(model)sForm(instance=item).as_p(), 'token': get_token(request)}) + else: + d['form'] = form + return JsonResponse(data={'form': d['form'].as_p(), 'token': get_token(request)}, success=False) + d['%(lower_model)s'] = %(model)s.objects.get(pk=id) + return render(request, template, d) +""" + +DELETE_VIEW = """ +def %(lower_model)s_delete(request, id): + item = %(model)s.objects.get(pk=id) + item.delete() + return JsonResponse() +""" +# MODELS CONSTS + +MODEL_TEMPLATE = """ +class %s(models.Model): + %s + update_date = models.DateTimeField(auto_now=True) + create_date = models.DateTimeField(auto_now_add=True) + + class Meta: + ordering = ['-id'] +""" + +IMPORT_MODEL_TEMPLATE = """from %(app)s.models import %(model)s""" + +CHARFIELD_TEMPLATE = """ + %(name)s = models.CharField(max_length=%(length)s, null=%(null)s, blank=%(null)s) +""" + +TEXTFIELD_TEMPLATE = """ + %(name)s = models.TextField(null=%(null)s, blank=%(null)s) +""" + +INTEGERFIELD_TEMPLATE = """ + %(name)s = models.IntegerField(null=%(null)s, default=%(default)s) +""" + +DECIMALFIELD_TEMPLATE = """ + %(name)s = models.DecimalField(max_digits=%(digits)s, decimal_places=%(places)s, null=%(null)s, default=%(default)s) +""" + +DATETIMEFIELD_TEMPLATE = """ + %(name)s = models.DateTimeField(null=%(null)s, default=%(default)s) +""" + +FOREIGNFIELD_TEMPLATE = """ + %(name)s = models.ForeignKey(%(foreign)s, null=%(null)s, blank=%(null)s) +""" + +TEMPLATE_LIST_CONTENT = """ +{%% extends "base.html" %%} + +{%% block page-title %%}%(title)s{%% endblock %%} + +{%% block content %%} +

%(model)s list


+ + + + + + + {%% for item in %(model)s_list %%} + + + + + + {%% endfor %%} +
IDNameAction
{{ item.id }}{{ item }}show
+
+

+ + + +{%% endblock %%} +""" + +TEMPLATE_DETAILS_CONTENT = """ +{%% extends "base.html" %%} + +{%% block page-title %%}%(title)s - {{ %(model)s }} {%% endblock %%} + +{%% block content %%} +
+

%(model)s - {{ %(model)s }}


+ + + + + + + + + + + +
IDNameAction
{{ %(model)s.id }}{{ %(model)s }}
+
+
+
+

+ +
+ + + back to list +{%% endblock %%} +""" + +URL_CONTENT = """ +from django.conf.urls import url +from django.contrib.auth import views as auth_views + +from %(app)s import views + +urlpatterns = [ + url(r'^%(model)s/$', views.%(model)s_list, name='%(model)s-list'), + url(r'^%(model)s/(?P\d+)/$', views.%(model)s_details, name='%(model)s-details'), + url(r'^%(model)s/(?P\d+)/delete/$', views.%(model)s_delete, name='%(model)s-delete'), +] +""" + +URL_EXISTS_CONTENT = """ + url(r'^%(model)s/$', views.%(model)s_list, name='%(model)s-list'), + url(r'^%(model)s/(?P\d+)/$', views.%(model)s_details, name='%(model)s-details'), + url(r'^%(model)s/(?P\d+)/delete/$', views.%(model)s_delete, name='%(model)s-delete'), +""" + +ADMIN_CONTENT = """ +from %(app)s.models import %(model)s +admin.site.register(%(model)s) +""" + +FORM_CONTENT = """ + +from %(app)s.models import %(model)s + +class %(model)sForm(forms.ModelForm): + class Meta: + model = %(model)s +""" + +TESTS_CONTENT = """ + +from %(app)s.models import %(model)s + + +class %(model)sTest(TestCase): + + def setUp(self): + self.user = User.objects.create(username='test_user') + + def tearDown(self): + self.user.delete() + + def test_list(self): + response = self.client.get(reverse('%(lower_model)s-list')) + self.failUnlessEqual(response.status_code, 200) + + def test_crud(self): + # Create new instance + response = self.client.post(reverse('%(lower_model)s-list'), {}) + self.assertContains(response, '"success": true') + + # Read instance + items = %(model)s.objects.all() + self.failUnlessEqual(items.count(), 1) + item = items[0] + response = self.client.get(reverse('%(lower_model)s-details', kwargs={'id': item.id})) + self.failUnlessEqual(response.status_code, 200) + + # Update instance + response = self.client.post(reverse('%(lower_model)s-details', kwargs={'id': item.id}), {}) + self.assertContains(response, '"success": true') + + # Delete instance + response = self.client.post(reverse('%(lower_model)s-delete', kwargs={'id': item.id}), {}) + self.assertContains(response, '"success": true') + items = %(model)s.objects.all() + self.failUnlessEqual(items.count(), 0) + +""" + + +class Scaffold(object): + def _info(self, msg, indent=0): + print("{0} {1}".format("\t" * int(indent), msg)) + + def __init__(self, app, model, fields): + self.app = app + self.model = model + self.fields = fields + + try: + self.SCAFFOLD_APPS_DIR = settings.SCAFFOLD_APPS_DIR + except: + self.SCAFFOLD_APPS_DIR = './' + + def get_import(self, model): + for dir in listdir(self.SCAFFOLD_APPS_DIR): + if path.isdir('{0}{1}'.format(self.SCAFFOLD_APPS_DIR, dir)) \ + and path.exists('{0}{1}/models.py'.format(self.SCAFFOLD_APPS_DIR, dir)): + with open('{0}{1}/models.py'.format(self.SCAFFOLD_APPS_DIR, dir), 'r') as fp: + # Check if model exists + for line in fp.readlines(): + if 'class {0}(models.Model)'.format(model) in line: + # print "Foreign key '%s' was found in app %s..." % (model, dir) + return IMPORT_MODEL_TEMPLATE % {'app': dir, 'model': model} + return None + + def is_imported(self, path, model): + with open(path, 'r') as import_file: + for line in import_file.readlines(): + if 'import {0}'.format(model) in line: + # print "Foreign key '%s' was found in models.py..." % (foreign) + return True + return False + + def add_global_view_imports(self, path): + # from django.shortcuts import render, redirect, get_object_or_404, get_list_or_404 + import_list = list() + + with open(path, 'r') as import_file: + need_import_shortcut = True + need_import_urlresolvers = True + need_import_users = True + need_import_token = True + need_import_JsonResponse = True + + for line in import_file.readlines(): + if 'from django.shortcuts import render, redirect, get_object_or_404' in line: + need_import_shortcut = False + + if 'from django.core.urlresolvers import reverse' in line: + need_import_urlresolvers = False + + if 'from django.contrib.auth.models import User, Group' in line: + need_import_users = False + + if 'from django.middleware.csrf import get_token' in line: + need_import_token = False + + if 'from django_common.http import JsonResponse' in line: + need_import_JsonResponse = False + + if need_import_shortcut: + import_list.append( + 'from django.shortcuts import render, redirect, get_object_or_404') + if need_import_urlresolvers: + import_list.append('from django.core.urlresolvers import reverse') + if need_import_users: + import_list.append('from django.contrib.auth.models import User, Group') + if need_import_token: + import_list.append('from django.middleware.csrf import get_token') + if need_import_JsonResponse: + import_list.append('from django_common.http import JsonResponse') + + return import_list + + def view_exists(self, path, view): + # Check if view already exists + with open(path, 'r') as view_file: + for line in view_file.readlines(): + if 'def {0}('.format(view) in line: + return True + return False + + def get_field(self, field): + field = field.split(':') + field_type = field[0] + + if field_type.lower() == 'char': + try: + length = field[2] + except IndexError: + length = 255 + + try: + null = field[3] + null = 'False' + except IndexError: + null = 'True' + + return CHARFIELD_TEMPLATE % {'name': field[1], 'length': length, 'null': null} + elif field_type.lower() == 'text': + try: + null = field[2] + null = 'False' + except IndexError: + null = 'True' + + return TEXTFIELD_TEMPLATE % {'name': field[1], 'null': null} + elif field_type.lower() == 'int': + try: + null = field[2] + null = 'False' + except IndexError: + null = 'True' + + try: + default = field[3] + except IndexError: + default = None + + return INTEGERFIELD_TEMPLATE % {'name': field[1], 'null': null, 'default': default} + elif field_type.lower() == 'decimal': + try: + null = field[4] + null = 'False' + except IndexError: + null = 'True' + + try: + default = field[5] + except IndexError: + default = None + + return DECIMALFIELD_TEMPLATE % { + 'name': field[1], + 'digits': field[2], + 'places': field[3], + 'null': null, + 'default': default, + } + elif field_type.lower() == 'datetime': + try: + null = field[2] + null = 'False' + except IndexError: + null = 'True' + + try: + default = field[3] + except IndexError: + default = None + + return DATETIMEFIELD_TEMPLATE % {'name': field[1], 'null': null, 'default': default} + elif field_type.lower() == 'foreign': + foreign = field[2] + name = field[1] + # Check if this foreign key is already in models.py + if foreign in ('User', 'Group'): + if not self.is_imported('{0}{1}/models.py'.format(self.SCAFFOLD_APPS_DIR, + self.app), foreign): + self.imports.append('\nfrom django.contrib.auth.models import User, Group\n') + return FOREIGNFIELD_TEMPLATE % {'name': name, 'foreign': foreign, 'null': 'True'} + if self.is_imported('{0}{1}/models.py'.format( + self.SCAFFOLD_APPS_DIR, self.app), foreign): + return FOREIGNFIELD_TEMPLATE % {'name': name, 'foreign': foreign, 'null': 'True'} + # Check imports + if self.get_import(foreign): + self.imports.append(self.get_import(foreign)) + return FOREIGNFIELD_TEMPLATE % {'name': name, 'foreign': foreign, 'null': 'True'} + + self._info('error\t{0}{1}/models.py\t{2} class not found'.format( + self.SCAFFOLD_APPS_DIR, self.app, foreign), 1) + return None + + def create_app(self): + self._info(" App ") + self._info("===========") + if self.SCAFFOLD_APPS_DIR and not path.exists('{0}'.format(self.SCAFFOLD_APPS_DIR)): + raise Exception( + "SCAFFOLD_APPS_DIR {0} does not exists".format(self.SCAFFOLD_APPS_DIR)) + if not path.exists('{0}{1}'.format(self.SCAFFOLD_APPS_DIR, self.app)): + system('python manage.py startapp {0}'.format(self.app)) + system('mv {0} {1}{2}'.format(self.app, self.SCAFFOLD_APPS_DIR, self.app)) + self._info("create\t{0}{1}".format(self.SCAFFOLD_APPS_DIR, self.app), 1) + else: + self._info("exists\t{0}{1}".format(self.SCAFFOLD_APPS_DIR, self.app), 1) + + def create_views(self): + self._info(" Views ") + self._info("===========") + # Open models.py to read + view_path = '{0}{1}/views.py'.format(self.SCAFFOLD_APPS_DIR, self.app) + + # Check if urls.py exists + if path.exists('{0}{1}/views.py'.format(self.SCAFFOLD_APPS_DIR, self.app)): + self._info('exists\t{0}{1}/views.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1) + else: + with open("{0}{1}/views.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'w'): + self._info('create\t{0}{1}/views.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1) + + import_list = list() + view_list = list() + + # Add global imports + import_list.append('\n'.join(imp for imp in self.add_global_view_imports(view_path))) + + # Add model imports + if not self.is_imported(view_path, self.model): + import_list.append(self.get_import(self.model)) + + lower_model = self.model.lower() + + # Check if view already exists + if not self.view_exists(view_path, "{0}_list".format(lower_model)): + view_list.append(LIST_VIEW % { + 'lower_model': lower_model, + 'model': self.model, + 'app': self.app, + }) + self._info("added \t{0}\t{1}_view".format(view_path, lower_model), 1) + else: + self._info("exists\t{0}\t{1}_view".format(view_path, lower_model), 1) + + if not self.view_exists(view_path, "{0}_details".format(lower_model)): + view_list.append(DETAILS_VIEW % { + 'lower_model': lower_model, + 'model': self.model, + 'app': self.app, + }) + self._info("added \t{0}\t{1}_details".format(view_path, lower_model), 1) + else: + self._info("exists\t{0}\t{1}_details".format(view_path, lower_model), 1) + + if not self.view_exists(view_path, "{0}_delete".format(lower_model)): + view_list.append(DELETE_VIEW % { + 'lower_model': lower_model, + 'model': self.model, + }) + self._info("added \t{0}\t{1}_delete".format(view_path, lower_model), 1) + else: + self._info("exists\t{0}\t{1}_delete".format(view_path, lower_model), 1) + + # Open views.py to append + with open(view_path, 'a') as view_file: + view_file.write('\n'.join([import_line for import_line in import_list])) + view_file.write(''.join([view for view in view_list])) + + def create_model(self): + self._info(" Model ") + self._info("===========") + + # Open models.py to read + with open('{0}{1}/models.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 'r') as fp: + self.models_file = fp + + # Check if model already exists + for line in self.models_file.readlines(): + if 'class {0}'.format(self.model) in line: + self._info('exists\t{0}{1}/models.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1) + return + + self._info('create\t{0}{1}/models.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1) + + # Prepare fields + self.imports = [] + fields = [] + + for field in self.fields: + new_field = self.get_field(field) + + if new_field: + fields.append(new_field) + self._info('added\t{0}{1}/models.py\t{2} field'.format( + self.SCAFFOLD_APPS_DIR, self.app, field.split(':')[1]), 1) + + # Open models.py to append + with open('{0}{1}/models.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 'a') as fp: + fp.write(''.join([import_line for import_line in self.imports])) + fp.write(MODEL_TEMPLATE % (self.model, ''.join(field for field in fields))) + + def create_templates(self): + self._info(" Templates ") + self._info("===========") + + # Check if template dir exists + + if path.exists('{0}{1}/templates/'.format(self.SCAFFOLD_APPS_DIR, self.app)): + self._info('exists\t{0}{1}/templates/'.format(self.SCAFFOLD_APPS_DIR, self.app), 1) + else: + mkdir("{0}{1}/templates/".format(self.SCAFFOLD_APPS_DIR, self.app)) + self._info('create\t{0}{1}/templates/'.format(self.SCAFFOLD_APPS_DIR, self.app), 1) + + # Check if model template dir exists + + if path.exists('{0}{1}/templates/{2}/'.format(self.SCAFFOLD_APPS_DIR, self.app, + self.model.lower())): + self._info('exists\t{0}{1}/templates/{2}/'.format(self.SCAFFOLD_APPS_DIR, self.app, + self.model.lower()), 1) + else: + mkdir("{0}{1}/templates/{2}/".format(self.SCAFFOLD_APPS_DIR, self.app, + self.model.lower())) + self._info('create\t{0}{1}/templates/{2}/'.format( + self.SCAFFOLD_APPS_DIR, self.app, self.model.lower()), 1) + + # Check if list.html exists + + if path.exists('{0}{1}/templates/{2}/list.html'.format(self.SCAFFOLD_APPS_DIR, self.app, + self.model.lower())): + self._info('exists\t{0}{1}/templates/{2}/list.html'.format( + self.SCAFFOLD_APPS_DIR, self.app, self.model.lower()), 1) + else: + with open("{0}{1}/templates/{2}/list.html".format(self.SCAFFOLD_APPS_DIR, self.app, + self.model.lower()), 'w') as fp: + fp.write(TEMPLATE_LIST_CONTENT % { + 'model': self.model.lower(), + 'title': self.model.lower(), + }) + self._info('create\t{0}{1}/templates/{2}/list.html'.format( + self.SCAFFOLD_APPS_DIR, self.app, self.model.lower()), 1) + + # Check if details.html exists + + if path.exists('{0}{1}/templates/{2}/details.html'.format( + self.SCAFFOLD_APPS_DIR, self.app, self.model.lower())): + self._info('exists\t{0}{1}/templates/{2}/details.html'.format( + self.SCAFFOLD_APPS_DIR, self.app, self.model.lower()), 1) + else: + with open("{0}{1}/templates/{2}/details.html".format( + self.SCAFFOLD_APPS_DIR, self.app, self.model.lower()), 'w') as fp: + fp.write(TEMPLATE_DETAILS_CONTENT % { + 'model': self.model.lower(), + 'title': self.model.lower(), + }) + self._info('create\t{0}{1}/templates/{2}/details.html'.format( + self.SCAFFOLD_APPS_DIR, self.app, self.model.lower()), 1) + + def create_urls(self): + self._info(" URLs ") + self._info("===========") + + # Check if urls.py exists + + if path.exists('{0}{1}/urls.py'.format(self.SCAFFOLD_APPS_DIR, self.app)): + + # If does we need to add urls + new_urls = '' + with open("{0}{1}/urls.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'r') as fp: + for line in fp.readlines(): + new_urls += line + if 'urlpatterns' in line: + new_urls += URL_EXISTS_CONTENT % { + 'app': self.app, + 'model': self.model.lower(), + } + with open("{0}{1}/urls.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'w') as fp: + fp.write(new_urls) + self._info('update\t{0}{1}/urls.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1) + else: + with open("{0}{1}/urls.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'w') as fp: + fp.write(URL_CONTENT % { + 'app': self.app, + 'model': self.model.lower(), + }) + + self._info('create\t{0}{1}/urls.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1) + + def create_admin(self): + self._info(" Admin ") + self._info("===========") + + # Check if admin.py exists + + if path.exists('{0}{1}/admin.py'.format(self.SCAFFOLD_APPS_DIR, self.app)): + self._info('exists\t{0}{1}/admin.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1) + else: + with open("{0}{1}/admin.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'w') as fp: + fp.write("from django.contrib import admin\n") + self._info('create\t{0}{1}/urls.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1) + + # Check if admin entry already exists + + with open("{0}{1}/admin.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'r') as fp: + content = fp.read() + if "admin.site.register({0})".format(self.model) in content: + self._info('exists\t{0}{1}/admin.py\t{2}'.format(self.SCAFFOLD_APPS_DIR, self.app, + self.model.lower()), 1) + else: + with open("{0}{1}/admin.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'a') as fp: + fp.write(ADMIN_CONTENT % {'app': self.app, 'model': self.model}) + self._info('added\t{0}{1}/admin.py\t{2}'.format(self.SCAFFOLD_APPS_DIR, self.app, + self.model.lower()), 1) + + def create_forms(self): + self._info(" Forms ") + self._info("===========") + + # Check if forms.py exists + if path.exists('{0}{1}/forms.py'.format(self.SCAFFOLD_APPS_DIR, self.app)): + self._info('exists\t{0}{1}/forms.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1) + else: + with open("{0}{1}/forms.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'w') as fp: + fp.write("from django import forms\n") + self._info('create\t{0}{1}/forms.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1) + + # Check if form entry already exists + + with open("{0}{1}/forms.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'r') as fp: + content = fp.read() + if "class {0}Form".format(self.model) in content: + self._info('exists\t{0}{1}/forms.py\t{2}'.format( + self.SCAFFOLD_APPS_DIR, self.app, self.model.lower()), 1) + else: + with open("{0}{1}/forms.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'a') as fp: + fp.write(FORM_CONTENT % {'app': self.app, 'model': self.model}) + self._info('added\t{0}{1}/forms.py\t{2}'.format( + self.SCAFFOLD_APPS_DIR, self.app, self.model.lower()), 1) + + def create_tests(self): + self._info(" Tests ") + self._info("===========") + + # Check if tests.py exists + if path.exists('{0}{1}/tests.py'.format(self.SCAFFOLD_APPS_DIR, self.app)): + self._info('exists\t{0}{1}/tests.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1) + # Check if imports exists: + import_testcase = True + import_user = True + import_reverse = True + + with open("{0}{1}/tests.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'r') as fp: + for line in fp.readlines(): + if 'import TestCase' in line: + import_testcase = False + if 'import User' in line: + import_user = False + if 'import reverse' in line: + import_reverse = False + + with open("{0}{1}/tests.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'a') as fp: + if import_testcase: + fp.write("from django.test import TestCase\n") + if import_user: + fp.write("from django.contrib.auth.models import User\n") + if import_reverse: + fp.write("from django.core.urlresolvers import reverse\n") + else: + with open("{0}{1}/tests.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'w') as fp: + fp.write("from django.test import TestCase\n") + fp.write("from django.contrib.auth.models import User\n") + fp.write("from django.core.urlresolvers import reverse\n") + self._info('create\t{0}{1}/tests.py'.format(self.SCAFFOLD_APPS_DIR, self.app), 1) + + # Check if test class already exists + with open("{0}{1}/tests.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'r') as fp: + content = fp.read() + if "class {0}Test".format(self.model) in content: + self._info('exists\t{0}{1}/tests.py\t{2}'.format( + self.SCAFFOLD_APPS_DIR, self.app, self.model.lower()), 1) + else: + with open("{0}{1}/tests.py".format(self.SCAFFOLD_APPS_DIR, self.app), 'a') as fp: + fp.write(TESTS_CONTENT % { + 'app': self.app, + 'model': self.model, + 'lower_model': self.model.lower(), + }) + + self._info('added\t{0}{1}/tests.py\t{2}'.format(self.SCAFFOLD_APPS_DIR, self.app, + self.model.lower()), 1) + + def run(self): + if not self.app: + sys.exit("No application name found...") + if not self.app.isalnum(): + sys.exit("Model name should be alphanumerical...") + self.create_app() + if self.model: + self.create_model() + self.create_views() + self.create_admin() + self.create_forms() + self.create_urls() + self.create_templates() + self.create_tests() diff --git a/thesisenv/lib/python3.6/site-packages/django_common/session.py b/thesisenv/lib/python3.6/site-packages/django_common/session.py new file mode 100644 index 0000000..22f2e32 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/session.py @@ -0,0 +1,83 @@ +from __future__ import print_function, unicode_literals, with_statement, division + +from datetime import datetime, timedelta +from django.conf import settings + + +class SessionManagerBase(object): + """ + Base class that a "SessionManager" concrete class should extend. + It should have a list called _SESSION_KEYS that lists all the keys that class uses/depends on. + + Ideally each app has a session.py that has this class and is used in the apps views etc. + """ + def __init__(self, request, prepend_key_with=''): + self._session = request.session + self._prepend_key_with = prepend_key_with + + def _get_or_set(self, key, value): + key = '{0}{1}'.format(self._prepend_key_with, key) + + if value is not None: + self._session[key] = value + return value + return self._session.get(key) + + def reset_keys(self): + for key in self._SESSION_KEYS: + key = '{0}{1}'.format(self._prepend_key_with, key) + + if key in self._session: + del self._session[key] + + +class SessionManager(SessionManagerBase): + """Manages storing the cart""" + + USER_ONLINE_TIMEOUT = 180 # 3 min + + USERTIME = 'usertime' + _GENERIC_VAR_KEY_PREFIX = 'lpvar_' # handles generic stuff being stored in the session + + _SESSION_KEYS = [ + USERTIME, + ] + + def __init__(self, request): + super(SessionManager, self).__init__(request, prepend_key_with=request.get_host()) + if not self._get_or_set(self.USERTIME, None): + self._get_or_set(self.USERTIME, None) + + def get_usertime(self): + usertime = self._get_or_set(self.USERTIME, None) + try: + return usertime['last'] - usertime['start'] + except: + return 0 + + def ping_usertime(self): + # Override default user online timeout + try: + timeout = int(settings.USER_ONLINE_TIMEOUT) + except: + timeout = self.USER_ONLINE_TIMEOUT + if not self._get_or_set(self.USERTIME, None): + self._get_or_set(self.USERTIME, {'start': datetime.now(), 'last': datetime.now()}) + else: + usertime = self._get_or_set(self.USERTIME, None) + if usertime['last'] + timedelta(seconds=timeout) < datetime.now(): + # This mean user reached timeout - we start from begining + self._get_or_set(self.USERTIME, {'start': datetime.now(), 'last': datetime.now()}) + else: + # We just update last time + usertime['last'] = datetime.now() + return self._get_or_set(self.USERTIME, None) + + def clear_usertime(self): + return self._get_or_set(self.USERTIME, {}) + + def generic_var(self, key, value=None): + """ + Stores generic variables in the session prepending it with _GENERIC_VAR_KEY_PREFIX. + """ + return self._get_or_set('{0}{1}'.format(self._GENERIC_VAR_KEY_PREFIX, key), value) diff --git a/thesisenv/lib/python3.6/site-packages/django_common/settings.py b/thesisenv/lib/python3.6/site-packages/django_common/settings.py new file mode 100644 index 0000000..86f4079 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/settings.py @@ -0,0 +1,3 @@ +from __future__ import print_function, unicode_literals, with_statement, division + +VERSION = '0.9.2' diff --git a/thesisenv/lib/python3.6/site-packages/django_common/static/django_common/js/ajax_form.js b/thesisenv/lib/python3.6/site-packages/django_common/static/django_common/js/ajax_form.js new file mode 100644 index 0000000..908fb87 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/static/django_common/js/ajax_form.js @@ -0,0 +1,71 @@ +/* +* Helper class for forms, mostly helps with ajax form submits etc. +* +* + Assumes there is an image with class 'ajax-indicator' on the page somewhere. +*/ +function FormHelper(form_id) { + if (form_id) { + this.__form = $('#' + form_id); + } else { + this.__form = $('form'); + } +} + +FormHelper.prototype.bind_for_ajax = function(success_handler, failure_handler) { + var self=this; + this.__form.submit(function() { + self.ajax_submit(success_handler, failure_handler); + return false; + }); +} + +FormHelper.prototype.ajax_submit = function(success_handler, failure_handler) { + this.__clear_errors(); + this.__form.find('img.ajax-indicator').show(); + + var self=this; + $.post(this.__form.attr('action'), this.__form.serialize(), + function(data) { + if (data.success) { + success_handler(data); + } else if (failure_handler != undefined) { + failure_handler(data); + } else { + self.__fill_errors(data); + } + self.__form.find('img.ajax-indicator').hide(); + }, + "json"); + + this.__toggle_inputs_disable_state(true); +}; + +FormHelper.prototype.__fill_errors = function(data) { + if (data.form != undefined) { + for (var field in data.form.errors) { + if (field != 'non_field_errors') { + this.__form.find('#id_error_container_' + field).html(data.form.errors[field]); + this.__form.find('#id_' + field + '_container').addClass('errorRow').addClass('errRow'); + } else { + this.__form.prepend('
' + + data.form.errors['non_field_errors'] + '
'); + } + } + } + if (data.errors.length > 0) { + this.__form.prepend('
' + + data.errors + '
'); + } + + this.__toggle_inputs_disable_state(false); +}; + +FormHelper.prototype.__toggle_inputs_disable_state = function(disable) { + this.__form.find('input, select').attr('disabled', disable); +} + +FormHelper.prototype.__clear_errors = function() { + this.__form.find('div.error_container').empty(); + this.__form.find('div.formRow').removeClass('errorRow').removeClass('errRow'); + $('#id_non_field_errors').remove(); +}; diff --git a/thesisenv/lib/python3.6/site-packages/django_common/static/django_common/js/common.js b/thesisenv/lib/python3.6/site-packages/django_common/static/django_common/js/common.js new file mode 100644 index 0000000..e5d0734 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/static/django_common/js/common.js @@ -0,0 +1,12 @@ +/* +* Common js functions +* +*/ + +function confirmModal(msg) { + if (confirm(msg)) { + return true; + } else { + return false; + } +}; \ No newline at end of file diff --git a/thesisenv/lib/python3.6/site-packages/django_common/templates/common/admin/nested.html b/thesisenv/lib/python3.6/site-packages/django_common/templates/common/admin/nested.html new file mode 100644 index 0000000..3e8430c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/templates/common/admin/nested.html @@ -0,0 +1,79 @@ + {{ nested.formset.management_form }} + +
+

{{ nested.opts.verbose_name_plural|capfirst }}

+
+ + {% for field in nested.fields %} + + {% endfor %} + {% if nested.formset.can_delete %} + + {% endif %} + + + + + {% for formset in nested %} + {% if formset.form.non_field_errors %} + + {% endif %} + {% if forloop.last %} + + + {% for fieldset in formset %} + {% for line in fieldset %} + {% for field in line %} + + {% endfor %} + {% endfor %} + {% endfor %} + {% if formset.original and nested.formset.can_delete %} + + {% endif %} + + {% else %} + + + {% for fieldset in formset %} + {% for line in fieldset %} + {% for field in line %} + + {% endfor %} + {% endfor %} + {% endfor %} + {% if formset.original and nested.formset.can_delete %} + + {% else %} + + {% endif %} + + {% endif %} + + + {% endfor %} + + +
{{ field.label|capfirst }}Delete?
+ {{ form.form.non_field_errors }} +
+ {% if formset.original %}

+ {{ formset.original }} +

{% endif %} + {% if formset.has_auto_field %} + {{ formset.pk_field.field }} + {% endif %}{{ formset.fk_field.field }} +
+ {{ field.field.errors.as_ul}} + {{ field.field }} + {{ formset.deletion_field.field }}
+ {% if formset.original %}

+ {{ formset.original }} +

{% endif %} + {% if formset.has_auto_field %} + {{ formset.pk_field.field }} + {% endif %}{{ formset.fk_field.field }} +
+ {{ field.field.errors.as_ul}} + {{ field.field }} + {{ formset.deletion_field.field }}
\ No newline at end of file diff --git a/thesisenv/lib/python3.6/site-packages/django_common/templates/common/admin/nested_tabular.html b/thesisenv/lib/python3.6/site-packages/django_common/templates/common/admin/nested_tabular.html new file mode 100644 index 0000000..5e78db9 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/templates/common/admin/nested_tabular.html @@ -0,0 +1,506 @@ +{% load i18n admin_modify admin_static %} + +{% block extrahead %} + +{% endblock %} + +
+ +
+ + diff --git a/thesisenv/lib/python3.6/site-packages/django_common/templates/common/fragments/checkbox_field.html b/thesisenv/lib/python3.6/site-packages/django_common/templates/common/fragments/checkbox_field.html new file mode 100644 index 0000000..d3019cd --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/templates/common/fragments/checkbox_field.html @@ -0,0 +1,12 @@ +
+ +
diff --git a/thesisenv/lib/python3.6/site-packages/django_common/templates/common/fragments/form_field.html b/thesisenv/lib/python3.6/site-packages/django_common/templates/common/fragments/form_field.html new file mode 100644 index 0000000..3ec57ee --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/templates/common/fragments/form_field.html @@ -0,0 +1,14 @@ +
+ +
+ {{ form_field }} + {% if form_field.help_text %} + {{ form_field.help_text|safe }} + {% endif %} + {% if form_field.errors %} + {{ form_field.errors|safe }} + {% endif %} +
+
diff --git a/thesisenv/lib/python3.6/site-packages/django_common/templates/common/fragments/multi_checkbox_field.html b/thesisenv/lib/python3.6/site-packages/django_common/templates/common/fragments/multi_checkbox_field.html new file mode 100644 index 0000000..72794d6 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/templates/common/fragments/multi_checkbox_field.html @@ -0,0 +1,14 @@ +
+
+ + {{ form_field }} +
+
diff --git a/thesisenv/lib/python3.6/site-packages/django_common/templates/common/fragments/radio_field.html b/thesisenv/lib/python3.6/site-packages/django_common/templates/common/fragments/radio_field.html new file mode 100644 index 0000000..4958c26 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/templates/common/fragments/radio_field.html @@ -0,0 +1,15 @@ +
+
+ + {{ form_field }} + +
+
diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/__init__.py b/thesisenv/lib/python3.6/site-packages/django_common/templatetags/__init__.py similarity index 100% rename from thesisenv/lib/python3.6/site-packages/celery/tests/app/__init__.py rename to thesisenv/lib/python3.6/site-packages/django_common/templatetags/__init__.py diff --git a/thesisenv/lib/python3.6/site-packages/django_common/templatetags/custom_tags.py b/thesisenv/lib/python3.6/site-packages/django_common/templatetags/custom_tags.py new file mode 100644 index 0000000..8e06ec9 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/templatetags/custom_tags.py @@ -0,0 +1,95 @@ +from __future__ import print_function, unicode_literals, with_statement, division + +from django import template +from django.forms import widgets +from django.template.loader import get_template + +register = template.Library() + + +class FormFieldNode(template.Node): + """ + Helper class for the render_form_field below + """ + def __init__(self, form_field, help_text=None, css_classes=None): + self.form_field = template.Variable(form_field) + self.help_text = help_text[1:-1] if help_text else help_text + self.css_classes = css_classes[1:-1] if css_classes else css_classes + + def render(self, context): + + try: + form_field = self.form_field.resolve(context) + except template.VariableDoesNotExist: + return '' + + widget = form_field.field.widget + + if isinstance(widget, widgets.HiddenInput): + return form_field + elif isinstance(widget, widgets.RadioSelect): + t = get_template('common/fragments/radio_field.html') + elif isinstance(widget, widgets.CheckboxInput): + t = get_template('common/fragments/checkbox_field.html') + elif isinstance(widget, widgets.CheckboxSelectMultiple): + t = get_template('common/fragments/multi_checkbox_field.html') + else: + t = get_template('common/fragments/form_field.html') + + help_text = self.help_text + if help_text is None: + help_text = form_field.help_text + + return t.render({ + 'form_field': form_field, + 'help_text': help_text, + 'css_classes': self.css_classes + }) + + +@register.tag +def render_form_field(parser, token): + """ + Usage is {% render_form_field form.field_name optional_help_text optional_css_classes %} + + - optional_help_text and optional_css_classes are strings + - if optional_help_text is not given, then it is taken from form field object + """ + try: + help_text = None + css_classes = None + + token_split = token.split_contents() + if len(token_split) == 4: + tag_name, form_field, help_text, css_classes = token.split_contents() + elif len(token_split) == 3: + tag_name, form_field, help_text = token.split_contents() + else: + tag_name, form_field = token.split_contents() + except ValueError: + raise template.TemplateSyntaxError( + "Unable to parse arguments for {0}".format(repr(token.contents.split()[0]))) + + return FormFieldNode(form_field, help_text=help_text, css_classes=css_classes) + + +@register.simple_tag +def active(request, pattern): + """ + Returns the string 'active' if pattern matches. + Used to assign a css class in navigation bars to active tab/section. + """ + if request.path == pattern: + return 'active' + return '' + + +@register.simple_tag +def active_starts(request, pattern): + """ + Returns the string 'active' if request url starts with pattern. + Used to assign a css class in navigation bars to active tab/section. + """ + if request.path.startswith(pattern): + return 'active' + return '' diff --git a/thesisenv/lib/python3.6/site-packages/django_common/tests.py b/thesisenv/lib/python3.6/site-packages/django_common/tests.py new file mode 100644 index 0000000..71a0c50 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/tests.py @@ -0,0 +1,38 @@ +from __future__ import print_function, unicode_literals, with_statement, division + +from django.utils import unittest +from django.core.management import call_command + +try: + from cStringIO import StringIO +except ImportError: + from StringIO import StringIO + +import sys +import random + + +class SimpleTestCase(unittest.TestCase): + def setUp(self): + pass + + def test_generate_secret_key(self): + """ Test generation of a secret key """ + out = StringIO() + sys.stdout = out + + for i in range(10): + random_number = random.randrange(10, 100) + call_command('generate_secret_key', length=random_number) + secret_key = self._get_secret_key(out.getvalue()) + + out.truncate(0) + out.seek(0) + + assert len(secret_key) == random_number + + def _get_secret_key(self, result): + """ Get only the value of a SECRET_KEY """ + for index, key in enumerate(result): + if key == ':': + return str(result[index + 1:]).strip() diff --git a/thesisenv/lib/python3.6/site-packages/django_common/tzinfo.py b/thesisenv/lib/python3.6/site-packages/django_common/tzinfo.py new file mode 100644 index 0000000..e224ad7 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common/tzinfo.py @@ -0,0 +1,177 @@ +from __future__ import print_function, unicode_literals, with_statement, division + +# From the python documentation +# http://docs.python.org/library/datetime.html +from datetime import tzinfo, timedelta, datetime + +ZERO = timedelta(0) +HOUR = timedelta(hours=1) + +# A UTC class. + + +class UTC(tzinfo): + """ + UTC + """ + def utcoffset(self, dt): + return ZERO + + def tzname(self, dt): + return "UTC" + + def dst(self, dt): + return ZERO + +utc = UTC() + +# A class building tzinfo objects for fixed-offset time zones. +# Note that FixedOffset(0, "UTC") is a different way to build a +# UTC tzinfo object. + + +class FixedOffset(tzinfo): + """ + Fixed offset in minutes east from UTC. + """ + def __init__(self, offset, name): + self.__offset = timedelta(minutes=offset) + self.__name = name + + def utcoffset(self, dt): + return self.__offset + + def tzname(self, dt): + return self.__name + + def dst(self, dt): + return ZERO + +# A class capturing the platform's idea of local time. + +import time as _time + +STDOFFSET = timedelta(seconds=-_time.timezone) +if _time.daylight: + DSTOFFSET = timedelta(seconds=-_time.altzone) +else: + DSTOFFSET = STDOFFSET + +DSTDIFF = DSTOFFSET - STDOFFSET + + +class LocalTimezone(tzinfo): + def utcoffset(self, dt): + if self._isdst(dt): + return DSTOFFSET + else: + return STDOFFSET + + def dst(self, dt): + if self._isdst(dt): + return DSTDIFF + else: + return ZERO + + def tzname(self, dt): + return _time.tzname[self._isdst(dt)] + + def _isdst(self, dt): + tt = (dt.year, dt.month, dt.day, + dt.hour, dt.minute, dt.second, + dt.weekday(), 0, -1) + stamp = _time.mktime(tt) + tt = _time.localtime(stamp) + return tt.tm_isdst > 0 + +Local = LocalTimezone() + + +# A complete implementation of current DST rules for major US time zones. + +def first_sunday_on_or_after(dt): + days_to_go = 6 - dt.weekday() + if days_to_go: + dt += timedelta(days_to_go) + return dt + + +# US DST Rules +# +# This is a simplified (i.e., wrong for a few cases) set of rules for US +# DST start and end times. For a complete and up-to-date set of DST rules +# and timezone definitions, visit the Olson Database (or try pytz): +# http://www.twinsun.com/tz/tz-link.htm +# http://sourceforge.net/projects/pytz/ (might not be up-to-date) +# +# In the US, since 2007, DST starts at 2am (standard time) on the second +# Sunday in March, which is the first Sunday on or after Mar 8. +DSTSTART_2007 = datetime(1, 3, 8, 2) +# and ends at 2am (DST time; 1am standard time) on the first Sunday of Nov. +DSTEND_2007 = datetime(1, 11, 1, 1) +# From 1987 to 2006, DST used to start at 2am (standard time) on the first +# Sunday in April and to end at 2am (DST time; 1am standard time) on the last +# Sunday of October, which is the first Sunday on or after Oct 25. +DSTSTART_1987_2006 = datetime(1, 4, 1, 2) +DSTEND_1987_2006 = datetime(1, 10, 25, 1) +# From 1967 to 1986, DST used to start at 2am (standard time) on the last +# Sunday in April (the one on or after April 24) and to end at 2am (DST time; +# 1am standard time) on the last Sunday of October, which is the first Sunday +# on or after Oct 25. +DSTSTART_1967_1986 = datetime(1, 4, 24, 2) +DSTEND_1967_1986 = DSTEND_1987_2006 + + +class USTimeZone(tzinfo): + def __init__(self, hours, reprname, stdname, dstname): + self.stdoffset = timedelta(hours=hours) + self.reprname = reprname + self.stdname = stdname + self.dstname = dstname + + def __repr__(self): + return self.reprname + + def tzname(self, dt): + if self.dst(dt): + return self.dstname + else: + return self.stdname + + def utcoffset(self, dt): + return self.stdoffset + self.dst(dt) + + def dst(self, dt): + if dt is None or dt.tzinfo is None: + # An exception may be sensible here, in one or both cases. + # It depends on how you want to treat them. The default + # fromutc() implementation (called by the default astimezone() + # implementation) passes a datetime with dt.tzinfo is self. + return ZERO + assert dt.tzinfo is self + + # Find start and end times for US DST. For years before 1967, return + # ZERO for no DST. + if 2006 < dt.year: + dststart, dstend = DSTSTART_2007, DSTEND_2007 + elif 1986 < dt.year < 2007: + dststart, dstend = DSTSTART_1987_2006, DSTEND_1987_2006 + elif 1966 < dt.year < 1987: + dststart, dstend = DSTSTART_1967_1986, DSTEND_1967_1986 + else: + return ZERO + + start = first_sunday_on_or_after(dststart.replace(year=dt.year)) + end = first_sunday_on_or_after(dstend.replace(year=dt.year)) + + # Can't compare naive to aware objects, so strip the timezone from + # dt first. + if start <= dt.replace(tzinfo=None) < end: + return HOUR + else: + return ZERO + +Eastern = USTimeZone(-5, "Eastern", "EST", "EDT") +Central = USTimeZone(-6, "Central", "CST", "CDT") +Mountain = USTimeZone(-7, "Mountain", "MST", "MDT") +Pacific = USTimeZone(-8, "Pacific", "PST", "PDT") diff --git a/thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/PKG-INFO b/thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/PKG-INFO new file mode 100644 index 0000000..a5af952 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/PKG-INFO @@ -0,0 +1,327 @@ +Metadata-Version: 1.1 +Name: django-common-helpers +Version: 0.9.2 +Summary: Common things every Django app needs! +Home-page: http://github.com/tivix/django-common +Author: Tivix +Author-email: dev@tivix.com +License: UNKNOWN +Description: ===================== + django-common-helpers + ===================== + + + Overview + --------- + + Django-common consists of the following things: + + - A middleware that makes sure your web-app runs either on or without 'www' in the domain. + + - A ``SessionManagerBase`` base class, that helps in keeping your session related code object-oriented and clean! See session.py for usage details. + + - An ``EmailBackend`` for authenticating users based on their email, apart from username. + + - Some custom db fields that you can use in your models including a ``UniqueHashField`` and ``RandomHashField``. + + - Bunch of helpful functions in helper.py + + - A ``render_form_field`` template tag that makes rendering form fields easy and DRY. + + - A couple of dry response classes: ``JsonResponse`` and ``XMLResponse`` in the django_common.http that can be used in views that give json/xml responses. + + + Installation + ------------- + + - Install django_common (ideally in your virtualenv!) using pip or simply getting a copy of the code and putting it in a directory in your codebase. + + - Add ``django_common`` to your Django settings ``INSTALLED_APPS``:: + + INSTALLED_APPS = [ + # ... + "django_common", + ] + + - Add the following to your settings.py with appropriate values: + + - IS_DEV + - IS_PROD + - DOMAIN_NAME + - WWW_ROOT + + - Add ``common_settings`` to your Django settings ``TEMPLATE_CONTEXT_PROCESSORS``:: + + TEMPLATE_CONTEXT_PROCESSORS = [ + # ... + 'django_common.context_processors.common_settings', + ] + + - Add ``EmailBackend`` to the Django settings ``AUTHENTICATION_BACKENDS``:: + + AUTHENTICATION_BACKENDS = ( + 'django_common.auth_backends.EmailBackend', + 'django.contrib.auth.backends.ModelBackend' + ) + + - Add ``WWWRedirectMiddleware`` if required to the list of middlewares:: + + MIDDLEWARE_CLASSES = [ + # ... + "WWWRedirectMiddleware", + ] + + - Scaffolds / ajax_form.js (ajax forms) etc. require jQuery + + + Scaffolding feature + ------------------- + + 1. Installing + + To get scaffold just download ``scaffold`` branch of django-common, add it to ``INSTALLED_APPS`` and set up ``SCAFFOLD_APPS_DIR`` in settings. + + Default is set to main app directory. However if you use django_base_project you must set up this to ``SCAFFOLD_APPS_DIR = 'apps/'``. + + 2. Run + + To run scaffold type:: + + python manage.py scaffold APPNAME --model MODELNAME [fields] + + APPNAME is app name. If app does not exists it will be created. + MODELNAME is model name. Just enter model name that you want to create (for example: Blog, Topic, Post etc). It must be alphanumerical. Only one model per run is allowed! + + [fields] - list of the model fields. + + 3. Field types + + Available fields:: + + char - CharField + text - TextField + int - IntegerFIeld + decimal -DecimalField + datetime - DateTimeField + foreign - ForeignKey + + All fields requires name that is provided after ``:`` sign, for example:: + + char:title text:body int:posts datetime:create_date + + Two fields ``foreign`` and ``decimal`` requires additional parameters: + + - "foreign" as third argument takes foreignkey model, example:: + + foreign:blog:Blog, foreign:post:Post, foreign:added_by:User + + NOTICE: All foreign key models must alread exist in project. User and Group model are imported automatically. + + - decimal field requires two more arguments ``max_digits`` and ``decimal_places``, example:: + + decimal:total_cost:10:2 + + NOTICE: To all models scaffold automatically adds two fields: update_date and create_date. + + 4. How it works? + + Scaffold creates models, views (CRUD), forms, templates, admin, urls and basic tests (CRUD). Scaffold templates are using two blocks extending from base.html:: + + {% extends "base.html" %} + {% block page-title %} {% endblock %} + {% block conent %} {% endblock %} + + So be sure you have your base.html set up properly. + + Scaffolding example usage + ------------------------- + + Let's create very simple ``forum`` app. We need ``Forum``, ``Topic`` and ``Post`` model. + + - Forum model + + Forum model needs just one field ``name``:: + + python manage.py scaffold forum --model Forum char:name + + - Topic model + + Topics are created by site users so we need: ``created_by``, ``title`` and ``Forum`` foreign key (``update_date`` and ``create_date`` are always added to models):: + + python manage.py scaffold forum --model Topic foreign:created_by:User char:title foreign:forum:Forum + + - Post model + + Last one are Posts. Posts are related to Topics. Here we need: ``title``, ``body``, ``created_by`` and foreign key to ``Topic``:: + + python manage.py scaffold forum --model Post char:title text:body foreign:created_by:User foreign:topic:Topic + + All data should be in place! + + Now you must add ``forum`` app to ``INSTALLED_APPS`` and include app in ``urls.py`` file by adding into urlpatterns:: + + urlpatterns = [ + ... + url(r'^', include('forum.urls')), + ] + + Now syncdb new app and you are ready to go:: + + python manage.py syncdb + + Run your server:: + + python manage.py runserver + + And go to forum main page:: + + http://localhost:8000/forum/ + + All structure are in place. Now you can personalize models, templates and urls. + + At the end you can test new app by runing test:: + + python manage.py test forum + + Creating test database for alias 'default'... + ....... + ---------------------------------------------------------------------- + Ran 7 tests in 0.884s + + OK + + Happy scaffolding! + + Generation of SECRET_KEY + ------------------------ + + Sometimes you need to generate a new ``SECRET_KEY`` so now you can generate it using this command: + + $ python manage.py generate_secret_key + + Sample output: + + $ python manage.py generate_secret_key + + SECRET_KEY: 7,=_3t?n@'wV=p`ITIA6"CUgJReZf?s:`f~Jtl#2i=i^z%rCp- + + Optional arguments + + 1. ``--length`` - is the length of the key ``default=50`` + 2. ``--alphabet`` - is the alphabet to use to generate the key ``default=ascii letters + punctuation symbols`` + + Django settings keys + -------------------- + + - DOMAIN_NAME - Domain name, ``"www.example.com"`` + - WWW_ROOT - Root website url, ``"https://www.example.com/"`` + - IS_DEV - Current environment is development environment + - IS_PROD - Current environment is production environment + + + This open-source app is brought to you by Tivix, Inc. ( http://tivix.com/ ) + + + Changelog + ========= + + 0.9.2 + ----- + - Change for Django 2.X + + 0.9.1 + ----- + - Change for Django 1.10 - render() must be called with a dict, not a Context + + 0.9.0 + ----- + - Django 1.10 support + - README.txt invalid characters fix + - Add support for custom user model in EmailBackend + - Fixes for DB fields and management commands + + 0.8.0 + ----- + - compatability code moved to compat.py + - ``generate_secret_key`` management command. + - Fix relating to https://code.djangoproject.com/ticket/17627, package name change. + - Pass form fields with HiddenInput widget through render_form_field + - string.format usage / other refactoring / more support for Python 3 + + + 0.7.0 + ----- + - PEP8 codebase cleanup. + - Improved python3 support. + - Django 1.8 support. + + 0.6.4 + ----- + - Added python3 support. + + 0.6.3 + ----- + - Changed mimetype to content_type in class JsonReponse to reflect Django 1.7 deprecation. + + 0.6.2 + ----- + - Django 1.7 compatability using simplejson as fallback + + + 0.6.1 + ----- + - Added support for attaching content to emails manually (without providing path to file). + + - Added LoginRequiredMixin + + + 0.6 + --- + - Added support for Django 1.5 + + - Added fixes in nested inlines + + - Added support for a multi-select checkbox field template and radio button in render_form_field + + - Added Test Email Backend for overwrite TO, CC and BCC fields in all outgoing emails + + - Added Custom File Email Backend to save emails as file with custom extension + + - Rewrote fragments to be Bootstrap-compatible + + + 0.5.1 + ----- + + - root_path deprecated in Django 1.4+ + + + 0.5 + --- + + - Added self.get_inline_instances() usages instead of self.inline_instances + + - Changed minimum requirement to Django 1.4+ because of the above. + + + 0.4 + --- + + - Added nested inline templates, js and full ajax support. Now we can add/remove nested fields dynamically. + + - JsonpResponse object for padded JSON + + - User time tracking feature - how long the user has been on site, associated middleware etc. + + - @anonymous_required decorator: for views that should not be accessed by a logged-in user. + + - Added EncryptedTextField and EncryptedCharField + + - Misc. bug fixes +Keywords: django +Platform: UNKNOWN +Classifier: Framework :: Django +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: System Administrators +Classifier: Operating System :: OS Independent +Classifier: Topic :: Software Development diff --git a/thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/SOURCES.txt b/thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/SOURCES.txt new file mode 100644 index 0000000..2dcbfef --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/SOURCES.txt @@ -0,0 +1,44 @@ +AUTHORS +LICENSE +MANIFEST.in +README.rst +setup.cfg +setup.py +django_common/__init__.py +django_common/admin.py +django_common/auth_backends.py +django_common/classmaker.py +django_common/compat.py +django_common/context_processors.py +django_common/db_fields.py +django_common/decorators.py +django_common/email_backends.py +django_common/helper.py +django_common/http.py +django_common/middleware.py +django_common/mixin.py +django_common/scaffold.py +django_common/session.py +django_common/settings.py +django_common/tests.py +django_common/tzinfo.py +django_common/management/__init__.py +django_common/management/commands/__init__.py +django_common/management/commands/generate_secret_key.py +django_common/management/commands/scaffold.py +django_common/static/django_common/js/ajax_form.js +django_common/static/django_common/js/common.js +django_common/templates/common/admin/nested.html +django_common/templates/common/admin/nested_tabular.html +django_common/templates/common/fragments/checkbox_field.html +django_common/templates/common/fragments/form_field.html +django_common/templates/common/fragments/multi_checkbox_field.html +django_common/templates/common/fragments/radio_field.html +django_common/templatetags/__init__.py +django_common/templatetags/custom_tags.py +django_common_helpers.egg-info/PKG-INFO +django_common_helpers.egg-info/SOURCES.txt +django_common_helpers.egg-info/dependency_links.txt +django_common_helpers.egg-info/not-zip-safe +django_common_helpers.egg-info/requires.txt +django_common_helpers.egg-info/top_level.txt \ No newline at end of file diff --git a/thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/dependency_links.txt b/thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/installed-files.txt b/thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/installed-files.txt new file mode 100644 index 0000000..f3a85d9 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/installed-files.txt @@ -0,0 +1,62 @@ +../django_common/__init__.py +../django_common/__pycache__/__init__.cpython-36.pyc +../django_common/__pycache__/admin.cpython-36.pyc +../django_common/__pycache__/auth_backends.cpython-36.pyc +../django_common/__pycache__/classmaker.cpython-36.pyc +../django_common/__pycache__/compat.cpython-36.pyc +../django_common/__pycache__/context_processors.cpython-36.pyc +../django_common/__pycache__/db_fields.cpython-36.pyc +../django_common/__pycache__/decorators.cpython-36.pyc +../django_common/__pycache__/email_backends.cpython-36.pyc +../django_common/__pycache__/helper.cpython-36.pyc +../django_common/__pycache__/http.cpython-36.pyc +../django_common/__pycache__/middleware.cpython-36.pyc +../django_common/__pycache__/mixin.cpython-36.pyc +../django_common/__pycache__/scaffold.cpython-36.pyc +../django_common/__pycache__/session.cpython-36.pyc +../django_common/__pycache__/settings.cpython-36.pyc +../django_common/__pycache__/tests.cpython-36.pyc +../django_common/__pycache__/tzinfo.cpython-36.pyc +../django_common/admin.py +../django_common/auth_backends.py +../django_common/classmaker.py +../django_common/compat.py +../django_common/context_processors.py +../django_common/db_fields.py +../django_common/decorators.py +../django_common/email_backends.py +../django_common/helper.py +../django_common/http.py +../django_common/management/__init__.py +../django_common/management/__pycache__/__init__.cpython-36.pyc +../django_common/management/commands/__init__.py +../django_common/management/commands/__pycache__/__init__.cpython-36.pyc +../django_common/management/commands/__pycache__/generate_secret_key.cpython-36.pyc +../django_common/management/commands/__pycache__/scaffold.cpython-36.pyc +../django_common/management/commands/generate_secret_key.py +../django_common/management/commands/scaffold.py +../django_common/middleware.py +../django_common/mixin.py +../django_common/scaffold.py +../django_common/session.py +../django_common/settings.py +../django_common/static/django_common/js/ajax_form.js +../django_common/static/django_common/js/common.js +../django_common/templates/common/admin/nested.html +../django_common/templates/common/admin/nested_tabular.html +../django_common/templates/common/fragments/checkbox_field.html +../django_common/templates/common/fragments/form_field.html +../django_common/templates/common/fragments/multi_checkbox_field.html +../django_common/templates/common/fragments/radio_field.html +../django_common/templatetags/__init__.py +../django_common/templatetags/__pycache__/__init__.cpython-36.pyc +../django_common/templatetags/__pycache__/custom_tags.cpython-36.pyc +../django_common/templatetags/custom_tags.py +../django_common/tests.py +../django_common/tzinfo.py +PKG-INFO +SOURCES.txt +dependency_links.txt +not-zip-safe +requires.txt +top_level.txt diff --git a/thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/not-zip-safe b/thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/not-zip-safe new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/not-zip-safe @@ -0,0 +1 @@ + diff --git a/thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/requires.txt b/thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/requires.txt new file mode 100644 index 0000000..f97e3c8 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/requires.txt @@ -0,0 +1 @@ +Django>=1.8.0 diff --git a/thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/top_level.txt b/thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/top_level.txt new file mode 100644 index 0000000..8951166 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_common_helpers-0.9.2-py3.6.egg-info/top_level.txt @@ -0,0 +1 @@ +django_common diff --git a/thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/PKG-INFO b/thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/PKG-INFO new file mode 100644 index 0000000..db3973d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/PKG-INFO @@ -0,0 +1,40 @@ +Metadata-Version: 1.1 +Name: django-cron +Version: 0.5.1 +Summary: Running python crons in a Django project +Home-page: http://github.com/tivix/django-cron +Author: Sumit Chachra +Author-email: chachra@tivix.com +License: UNKNOWN +Description: =========== + django-cron + =========== + + .. image:: https://travis-ci.org/Tivix/django-cron.png + :target: https://travis-ci.org/Tivix/django-cron + + + .. image:: https://coveralls.io/repos/Tivix/django-cron/badge.png + :target: https://coveralls.io/r/Tivix/django-cron?branch=master + + + .. image:: https://readthedocs.org/projects/django-cron/badge/?version=latest + :target: https://readthedocs.org/projects/django-cron/?badge=latest + + Django-cron lets you run Django/Python code on a recurring basis providing basic plumbing to track and execute tasks. The 2 most common ways in which most people go about this is either writing custom python scripts or a management command per cron (leads to too many management commands!). Along with that some mechanism to track success, failure etc. is also usually necesary. + + This app solves both issues to a reasonable extent. This is by no means a replacement for queues like Celery ( http://celeryproject.org/ ) etc. + + + Documentation + ============= + http://django-cron.readthedocs.org/en/latest/ + + This open-source app is brought to you by Tivix, Inc. ( http://tivix.com/ ) +Keywords: django cron +Platform: UNKNOWN +Classifier: Framework :: Django +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: System Administrators +Classifier: Operating System :: OS Independent +Classifier: Topic :: Software Development diff --git a/thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/SOURCES.txt b/thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/SOURCES.txt new file mode 100644 index 0000000..e0dccf3 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/SOURCES.txt @@ -0,0 +1,29 @@ +AUTHORS +LICENSE +MANIFEST.in +README.rst +setup.cfg +setup.py +django_cron/__init__.py +django_cron/admin.py +django_cron/cron.py +django_cron/helpers.py +django_cron/models.py +django_cron/tests.py +django_cron.egg-info/PKG-INFO +django_cron.egg-info/SOURCES.txt +django_cron.egg-info/dependency_links.txt +django_cron.egg-info/not-zip-safe +django_cron.egg-info/requires.txt +django_cron.egg-info/top_level.txt +django_cron/backends/__init__.py +django_cron/backends/lock/__init__.py +django_cron/backends/lock/base.py +django_cron/backends/lock/cache.py +django_cron/backends/lock/file.py +django_cron/management/__init__.py +django_cron/management/commands/__init__.py +django_cron/management/commands/runcrons.py +django_cron/migrations/0001_initial.py +django_cron/migrations/0002_remove_max_length_from_CronJobLog_message.py +django_cron/migrations/__init__.py \ No newline at end of file diff --git a/thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/dependency_links.txt b/thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/installed-files.txt b/thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/installed-files.txt new file mode 100644 index 0000000..bba35da --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/installed-files.txt @@ -0,0 +1,40 @@ +../django_cron/__init__.py +../django_cron/__pycache__/__init__.cpython-36.pyc +../django_cron/__pycache__/admin.cpython-36.pyc +../django_cron/__pycache__/cron.cpython-36.pyc +../django_cron/__pycache__/helpers.cpython-36.pyc +../django_cron/__pycache__/models.cpython-36.pyc +../django_cron/__pycache__/tests.cpython-36.pyc +../django_cron/admin.py +../django_cron/backends/__init__.py +../django_cron/backends/__pycache__/__init__.cpython-36.pyc +../django_cron/backends/lock/__init__.py +../django_cron/backends/lock/__pycache__/__init__.cpython-36.pyc +../django_cron/backends/lock/__pycache__/base.cpython-36.pyc +../django_cron/backends/lock/__pycache__/cache.cpython-36.pyc +../django_cron/backends/lock/__pycache__/file.cpython-36.pyc +../django_cron/backends/lock/base.py +../django_cron/backends/lock/cache.py +../django_cron/backends/lock/file.py +../django_cron/cron.py +../django_cron/helpers.py +../django_cron/management/__init__.py +../django_cron/management/__pycache__/__init__.cpython-36.pyc +../django_cron/management/commands/__init__.py +../django_cron/management/commands/__pycache__/__init__.cpython-36.pyc +../django_cron/management/commands/__pycache__/runcrons.cpython-36.pyc +../django_cron/management/commands/runcrons.py +../django_cron/migrations/0001_initial.py +../django_cron/migrations/0002_remove_max_length_from_CronJobLog_message.py +../django_cron/migrations/__init__.py +../django_cron/migrations/__pycache__/0001_initial.cpython-36.pyc +../django_cron/migrations/__pycache__/0002_remove_max_length_from_CronJobLog_message.cpython-36.pyc +../django_cron/migrations/__pycache__/__init__.cpython-36.pyc +../django_cron/models.py +../django_cron/tests.py +PKG-INFO +SOURCES.txt +dependency_links.txt +not-zip-safe +requires.txt +top_level.txt diff --git a/thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/not-zip-safe b/thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/not-zip-safe new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/not-zip-safe @@ -0,0 +1 @@ + diff --git a/thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/requires.txt b/thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/requires.txt new file mode 100644 index 0000000..4c78d87 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/requires.txt @@ -0,0 +1,2 @@ +Django>=1.8.0 +django-common-helpers>=0.6.4 diff --git a/thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/top_level.txt b/thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/top_level.txt new file mode 100644 index 0000000..235320b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_cron-0.5.1-py3.6.egg-info/top_level.txt @@ -0,0 +1 @@ +django_cron diff --git a/thesisenv/lib/python3.6/site-packages/django_cron/__init__.py b/thesisenv/lib/python3.6/site-packages/django_cron/__init__.py new file mode 100644 index 0000000..95be4c0 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_cron/__init__.py @@ -0,0 +1,234 @@ +import logging +from datetime import timedelta +import traceback +import time + +from django.conf import settings +from django.utils.timezone import now as utc_now, localtime, is_naive +from django.db.models import Q + + +DEFAULT_LOCK_BACKEND = 'django_cron.backends.lock.cache.CacheLock' +logger = logging.getLogger('django_cron') + + +def get_class(kls): + """ + TODO: move to django-common app. + Converts a string to a class. + Courtesy: http://stackoverflow.com/questions/452969/does-python-have-an-equivalent-to-java-class-forname/452981#452981 + """ + parts = kls.split('.') + module = ".".join(parts[:-1]) + m = __import__(module) + for comp in parts[1:]: + m = getattr(m, comp) + return m + + +def get_current_time(): + now = utc_now() + return now if is_naive(now) else localtime(now) + + +class Schedule(object): + def __init__(self, run_every_mins=None, run_at_times=None, retry_after_failure_mins=None): + if run_at_times is None: + run_at_times = [] + self.run_every_mins = run_every_mins + self.run_at_times = run_at_times + self.retry_after_failure_mins = retry_after_failure_mins + + +class CronJobBase(object): + """ + Sub-classes should have the following properties: + + code - This should be a code specific to the cron being run. Eg. 'general.stats' etc. + + schedule + + Following functions: + + do - This is the actual business logic to be run at the given schedule + """ + def __init__(self): + self.prev_success_cron = None + + def set_prev_success_cron(self, prev_success_cron): + self.prev_success_cron = prev_success_cron + + def get_prev_success_cron(self): + return self.prev_success_cron + + @classmethod + def get_time_until_run(cls): + from django_cron.models import CronJobLog + try: + last_job = CronJobLog.objects.filter( + code=cls.code).latest('start_time') + except CronJobLog.DoesNotExist: + return timedelta() + return (last_job.start_time + + timedelta(minutes=cls.schedule.run_every_mins) - utc_now()) + + +class CronJobManager(object): + """ + A manager instance should be created per cron job to be run. + Does all the logger tracking etc. for it. + Used as a context manager via 'with' statement to ensure + proper logger in cases of job failure. + """ + + def __init__(self, cron_job_class, silent=False, *args, **kwargs): + super(CronJobManager, self).__init__(*args, **kwargs) + + self.cron_job_class = cron_job_class + self.silent = silent + self.lock_class = self.get_lock_class() + self.previously_ran_successful_cron = None + + def should_run_now(self, force=False): + from django_cron.models import CronJobLog + cron_job = self.cron_job + """ + Returns a boolean determining whether this cron should run now or not! + """ + + self.user_time = None + self.previously_ran_successful_cron = None + + # If we pass --force options, we force cron run + if force: + return True + if cron_job.schedule.run_every_mins is not None: + + # We check last job - success or not + last_job = None + try: + last_job = CronJobLog.objects.filter(code=cron_job.code).latest('start_time') + except CronJobLog.DoesNotExist: + pass + if last_job: + if not last_job.is_success and cron_job.schedule.retry_after_failure_mins: + if get_current_time() > last_job.start_time + timedelta(minutes=cron_job.schedule.retry_after_failure_mins): + return True + else: + return False + + try: + self.previously_ran_successful_cron = CronJobLog.objects.filter( + code=cron_job.code, + is_success=True, + ran_at_time__isnull=True + ).latest('start_time') + except CronJobLog.DoesNotExist: + pass + + if self.previously_ran_successful_cron: + if get_current_time() > self.previously_ran_successful_cron.start_time + timedelta(minutes=cron_job.schedule.run_every_mins): + return True + else: + return True + + if cron_job.schedule.run_at_times: + for time_data in cron_job.schedule.run_at_times: + user_time = time.strptime(time_data, "%H:%M") + now = get_current_time() + actual_time = time.strptime("%s:%s" % (now.hour, now.minute), "%H:%M") + if actual_time >= user_time: + qset = CronJobLog.objects.filter( + code=cron_job.code, + ran_at_time=time_data, + is_success=True + ).filter( + Q(start_time__gt=now) | Q(end_time__gte=now.replace(hour=0, minute=0, second=0, microsecond=0)) + ) + if not qset: + self.user_time = time_data + return True + + return False + + def make_log(self, *messages, **kwargs): + cron_log = self.cron_log + + cron_job = getattr(self, 'cron_job', self.cron_job_class) + cron_log.code = cron_job.code + + cron_log.is_success = kwargs.get('success', True) + cron_log.message = self.make_log_msg(*messages) + cron_log.ran_at_time = getattr(self, 'user_time', None) + cron_log.end_time = get_current_time() + cron_log.save() + + def make_log_msg(self, msg, *other_messages): + MAX_MESSAGE_LENGTH = 1000 + if not other_messages: + # assume that msg is a single string + return msg[-MAX_MESSAGE_LENGTH:] + else: + if len(msg): + msg += "\n...\n" + NEXT_MESSAGE_OFFSET = MAX_MESSAGE_LENGTH - len(msg) + else: + NEXT_MESSAGE_OFFSET = MAX_MESSAGE_LENGTH + + if NEXT_MESSAGE_OFFSET > 0: + msg += other_messages[0][-NEXT_MESSAGE_OFFSET:] + return self.make_log_msg(msg, *other_messages[1:]) + else: + return self.make_log_msg(msg) + + def __enter__(self): + from django_cron.models import CronJobLog + self.cron_log = CronJobLog(start_time=get_current_time()) + + return self + + def __exit__(self, ex_type, ex_value, ex_traceback): + if ex_type == self.lock_class.LockFailedException: + if not self.silent: + logger.info(ex_value) + + elif ex_type is not None: + try: + trace = "".join(traceback.format_exception(ex_type, ex_value, ex_traceback)) + self.make_log(self.msg, trace, success=False) + except Exception as e: + err_msg = "Error saving cronjob log message: %s" % e + logger.error(err_msg) + + return True # prevent exception propagation + + def run(self, force=False): + """ + apply the logic of the schedule and call do() on the CronJobBase class + """ + cron_job_class = self.cron_job_class + if not issubclass(cron_job_class, CronJobBase): + raise Exception('The cron_job to be run must be a subclass of %s' % CronJobBase.__name__) + + with self.lock_class(cron_job_class, self.silent): + self.cron_job = cron_job_class() + + if self.should_run_now(force): + logger.debug("Running cron: %s code %s", cron_job_class.__name__, self.cron_job.code) + self.msg = self.cron_job.do() + self.make_log(self.msg, success=True) + self.cron_job.set_prev_success_cron(self.previously_ran_successful_cron) + + def get_lock_class(self): + name = getattr(settings, 'DJANGO_CRON_LOCK_BACKEND', DEFAULT_LOCK_BACKEND) + try: + return get_class(name) + except Exception as err: + raise Exception("invalid lock module %s. Can't use it: %s." % (name, err)) + + @property + def msg(self): + return getattr(self, '_msg', '') + + @msg.setter + def msg(self, msg): + if msg is None: + msg = '' + self._msg = msg diff --git a/thesisenv/lib/python3.6/site-packages/django_cron/admin.py b/thesisenv/lib/python3.6/site-packages/django_cron/admin.py new file mode 100644 index 0000000..af70b5a --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_cron/admin.py @@ -0,0 +1,56 @@ +from datetime import timedelta + +from django.contrib import admin +from django.db.models import F +from django.utils.translation import ugettext_lazy as _ + +from django_cron.models import CronJobLog +from django_cron.helpers import humanize_duration + + +class DurationFilter(admin.SimpleListFilter): + title = _('duration') + parameter_name = 'duration' + + def lookups(self, request, model_admin): + return ( + ('lte_minute', _('<= 1 minute')), + ('gt_minute', _('> 1 minute')), + ('gt_hour', _('> 1 hour')), + ('gt_day', _('> 1 day')), + ) + + def queryset(self, request, queryset): + if self.value() == 'lte_minute': + return queryset.filter(end_time__lte=F('start_time') + timedelta(minutes=1)) + if self.value() == 'gt_minute': + return queryset.filter(end_time__gt=F('start_time') + timedelta(minutes=1)) + if self.value() == 'gt_hour': + return queryset.filter(end_time__gt=F('start_time') + timedelta(hours=1)) + if self.value() == 'gt_day': + return queryset.filter(end_time__gt=F('start_time') + timedelta(days=1)) + + +class CronJobLogAdmin(admin.ModelAdmin): + class Meta: + model = CronJobLog + + search_fields = ('code', 'message') + ordering = ('-start_time',) + list_display = ('code', 'start_time', 'end_time', 'humanize_duration', 'is_success') + list_filter = ('code', 'start_time', 'is_success', DurationFilter) + + def get_readonly_fields(self, request, obj=None): + if not request.user.is_superuser and obj is not None: + names = [f.name for f in CronJobLog._meta.fields if f.name != 'id'] + return self.readonly_fields + tuple(names) + return self.readonly_fields + + def humanize_duration(self, obj): + return humanize_duration(obj.end_time - obj.start_time) + + humanize_duration.short_description = _("Duration") + humanize_duration.admin_order_field = 'duration' + + +admin.site.register(CronJobLog, CronJobLogAdmin) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/__init__.py b/thesisenv/lib/python3.6/site-packages/django_cron/backends/__init__.py similarity index 100% rename from thesisenv/lib/python3.6/site-packages/celery/tests/backends/__init__.py rename to thesisenv/lib/python3.6/site-packages/django_cron/backends/__init__.py diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/__init__.py b/thesisenv/lib/python3.6/site-packages/django_cron/backends/lock/__init__.py similarity index 100% rename from thesisenv/lib/python3.6/site-packages/celery/tests/bin/__init__.py rename to thesisenv/lib/python3.6/site-packages/django_cron/backends/lock/__init__.py diff --git a/thesisenv/lib/python3.6/site-packages/django_cron/backends/lock/base.py b/thesisenv/lib/python3.6/site-packages/django_cron/backends/lock/base.py new file mode 100644 index 0000000..f34f732 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_cron/backends/lock/base.py @@ -0,0 +1,68 @@ +class DjangoCronJobLock(object): + """ + The lock class to use in runcrons management command. + Intendent usage is + with CacheLock(cron_class, silent): + do work + or inside try - except: + try: + with CacheLock(cron_class, silent): + do work + except DjangoCronJobLock.LockFailedException: + pass + """ + class LockFailedException(Exception): + pass + + def __init__(self, cron_class, silent, *args, **kwargs): + """ + This method inits the class. + You should take care of getting all + nessesary thing from input parameters here + Base class processes + * self.job_name + * self.job_code + * self.parallel + * self.silent + for you. The rest is backend-specific. + """ + self.job_name = cron_class.__name__ + self.job_code = cron_class.code + self.parallel = getattr(cron_class, 'ALLOW_PARALLEL_RUNS', False) + self.silent = silent + + def lock(self): + """ + This method called to acquire lock. Typically. it will + be called from __enter__ method. + Return True is success, + False if fail. + Here you can optionally call self.notice_lock_failed(). + """ + raise NotImplementedError( + 'You have to implement lock(self) method for your class' + ) + + def release(self): + """ + This method called to release lock. + Tipically called from __exit__ method. + No need to return anything currently. + """ + raise NotImplementedError( + 'You have to implement release(self) method for your class' + ) + + def lock_failed_message(self): + return "%s: lock found. Will try later." % self.job_name + + def __enter__(self): + if self.parallel: + return + else: + if not self.lock(): + raise self.LockFailedException(self.lock_failed_message()) + + def __exit__(self, type, value, traceback): + if not self.parallel: + self.release() diff --git a/thesisenv/lib/python3.6/site-packages/django_cron/backends/lock/cache.py b/thesisenv/lib/python3.6/site-packages/django_cron/backends/lock/cache.py new file mode 100644 index 0000000..4e92859 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_cron/backends/lock/cache.py @@ -0,0 +1,73 @@ +from django.conf import settings +from django.core.cache import caches +from django.utils import timezone + +from django_cron.backends.lock.base import DjangoCronJobLock + + +class CacheLock(DjangoCronJobLock): + """ + One of simplest lock backends, uses django cache to + prevent parallel runs of commands. + """ + DEFAULT_LOCK_TIME = 24 * 60 * 60 # 24 hours + + def __init__(self, cron_class, *args, **kwargs): + super(CacheLock, self).__init__(cron_class, *args, **kwargs) + + self.cache = self.get_cache_by_name() + self.lock_name = self.get_lock_name() + self.timeout = self.get_cache_timeout(cron_class) + + def lock(self): + """ + This method sets a cache variable to mark current job as "already running". + """ + if self.cache.get(self.lock_name): + return False + else: + self.cache.set(self.lock_name, timezone.now(), self.timeout) + return True + + def release(self): + self.cache.delete(self.lock_name) + + def lock_failed_message(self): + started = self.get_running_lock_date() + msgs = [ + "%s: lock has been found. Other cron started at %s" % ( + self.job_name, started + ), + "Current timeout for job %s is %s seconds (cache key name is '%s')." % ( + self.job_name, self.timeout, self.lock_name + ) + ] + return msgs + + def get_cache_by_name(self): + """ + Gets a specified cache (or the `default` cache if CRON_CACHE is not set) + """ + cache_name = getattr(settings, 'DJANGO_CRON_CACHE', 'default') + + # Allow the possible InvalidCacheBackendError to happen here + # instead of allowing unexpected parallel runs of cron jobs + return caches[cache_name] + + def get_lock_name(self): + return self.job_name + + def get_cache_timeout(self, cron_class): + timeout = self.DEFAULT_LOCK_TIME + try: + timeout = getattr(cron_class, 'DJANGO_CRON_LOCK_TIME', settings.DJANGO_CRON_LOCK_TIME) + except: + pass + return timeout + + def get_running_lock_date(self): + date = self.cache.get(self.lock_name) + if date and not timezone.is_aware(date): + tz = timezone.get_current_timezone() + date = timezone.make_aware(date, tz) + return date diff --git a/thesisenv/lib/python3.6/site-packages/django_cron/backends/lock/file.py b/thesisenv/lib/python3.6/site-packages/django_cron/backends/lock/file.py new file mode 100644 index 0000000..7c54bef --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_cron/backends/lock/file.py @@ -0,0 +1,68 @@ +import os +import sys +import errno + +from django.conf import settings +from django.core.files import locks + +from django_cron.backends.lock.base import DjangoCronJobLock + + +class FileLock(DjangoCronJobLock): + """ + Quite a simple lock backend that uses some kind of pid file. + """ + def lock(self): + try: + lock_name = self.get_lock_name() + # need loop to avoid races on file unlinking + while True: + f = open(lock_name, 'wb+', 0) + locks.lock(f, locks.LOCK_EX | locks.LOCK_NB) + # Here is the Race: + # Previous process "A" is still running. Process "B" opens + # the file and then the process "A" finishes and deletes it. + # "B" locks the deleted file (by fd it already have) and runs, + # then the next process "C" creates _new_ file and locks it + # successfully while "B" is still running. + # We just need to check that "B" didn't lock a deleted file + # to avoid any problems. If process "C" have locked + # a new file wile "B" stats it then ok, let "B" quit and "C" + # run. We can still meet an attacker that permanently + # creates and deletes our file but we can't avoid problems + # in that case. + if os.path.isfile(lock_name): + st1 = os.fstat(f.fileno()) + st2 = os.stat(lock_name) + if st1.st_ino == st2.st_ino: + f.write(bytes(str(os.getpid()).encode('utf-8'))) + self.lockfile = f + return True + # else: + # retry. Don't unlink, next process might already use it. + f.close() + + except IOError as e: + if e.errno in (errno.EACCES, errno.EAGAIN): + return False + else: + e = sys.exc_info()[1] + raise e + # TODO: perhaps on windows I need to catch different exception type + + def release(self): + f = self.lockfile + # unlink before release lock to avoid race + # see comment in self.lock for description + os.unlink(f.name) + f.close() + + def get_lock_name(self): + default_path = '/tmp' + path = getattr(settings, 'DJANGO_CRON_LOCKFILE_PATH', default_path) + if not os.path.isdir(path): + # let it die if failed, can't run further anyway + os.makedirs(path) + + filename = self.job_name + '.lock' + return os.path.join(path, filename) diff --git a/thesisenv/lib/python3.6/site-packages/django_cron/cron.py b/thesisenv/lib/python3.6/site-packages/django_cron/cron.py new file mode 100644 index 0000000..944d1b6 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_cron/cron.py @@ -0,0 +1,46 @@ +from django.conf import settings + +from django_common.helper import send_mail + +from django_cron import CronJobBase, Schedule, get_class +from django_cron.models import CronJobLog + + +class FailedRunsNotificationCronJob(CronJobBase): + """ + Send email if cron failed to run X times in a row + """ + RUN_EVERY_MINS = 30 + + schedule = Schedule(run_every_mins=RUN_EVERY_MINS) + code = 'django_cron.FailedRunsNotificationCronJob' + + def do(self): + + crons_to_check = [get_class(x) for x in settings.CRON_CLASSES] + emails = [admin[1] for admin in settings.ADMINS] + + failed_runs_cronjob_email_prefix = getattr(settings, 'FAILED_RUNS_CRONJOB_EMAIL_PREFIX', '') + + for cron in crons_to_check: + + min_failures = getattr(cron, 'MIN_NUM_FAILURES', 10) + jobs = CronJobLog.objects.filter(code=cron.code).order_by('-end_time')[:min_failures] + failures = 0 + message = '' + + for job in jobs: + if not job.is_success: + failures += 1 + message += 'Job ran at %s : \n\n %s \n\n' % (job.start_time, job.message) + + if failures >= min_failures: + send_mail( + '%s%s failed %s times in a row!' % ( + failed_runs_cronjob_email_prefix, + cron.code, + min_failures, + ), + message, + settings.DEFAULT_FROM_EMAIL, emails + ) diff --git a/thesisenv/lib/python3.6/site-packages/django_cron/helpers.py b/thesisenv/lib/python3.6/site-packages/django_cron/helpers.py new file mode 100644 index 0000000..5b40ac4 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_cron/helpers.py @@ -0,0 +1,29 @@ +from django.utils.translation import ugettext as _ +from django.template.defaultfilters import pluralize + + +def humanize_duration(duration): + """ + Returns a humanized string representing time difference + + For example: 2 days 1 hour 25 minutes 10 seconds + """ + days = duration.days + hours = int(duration.seconds / 3600) + minutes = int(duration.seconds % 3600 / 60) + seconds = int(duration.seconds % 3600 % 60) + + parts = [] + if days > 0: + parts.append(u'%s %s' % (days, pluralize(days, _('day,days')))) + + if hours > 0: + parts.append(u'%s %s' % (hours, pluralize(hours, _('hour,hours')))) + + if minutes > 0: + parts.append(u'%s %s' % (minutes, pluralize(minutes, _('minute,minutes')))) + + if seconds > 0: + parts.append(u'%s %s' % (seconds, pluralize(seconds, _('second,seconds')))) + + return ', '.join(parts) if len(parts) != 0 else _('< 1 second') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/__init__.py b/thesisenv/lib/python3.6/site-packages/django_cron/management/__init__.py similarity index 100% rename from thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/__init__.py rename to thesisenv/lib/python3.6/site-packages/django_cron/management/__init__.py diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/concurrency/__init__.py b/thesisenv/lib/python3.6/site-packages/django_cron/management/commands/__init__.py similarity index 100% rename from thesisenv/lib/python3.6/site-packages/celery/tests/concurrency/__init__.py rename to thesisenv/lib/python3.6/site-packages/django_cron/management/commands/__init__.py diff --git a/thesisenv/lib/python3.6/site-packages/django_cron/management/commands/runcrons.py b/thesisenv/lib/python3.6/site-packages/django_cron/management/commands/runcrons.py new file mode 100644 index 0000000..2e506ba --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_cron/management/commands/runcrons.py @@ -0,0 +1,80 @@ +import traceback +from datetime import timedelta + +from django.core.management.base import BaseCommand +from django.conf import settings +from django.db import close_old_connections + +from django_cron import CronJobManager, get_class, get_current_time +from django_cron.models import CronJobLog + + +DEFAULT_LOCK_TIME = 24 * 60 * 60 # 24 hours + + +class Command(BaseCommand): + def add_arguments(self, parser): + parser.add_argument( + 'cron_classes', + nargs='*' + ) + parser.add_argument( + '--force', + action='store_true', + help='Force cron runs' + ) + parser.add_argument( + '--silent', + action='store_true', + help='Do not push any message on console' + ) + + def handle(self, *args, **options): + """ + Iterates over all the CRON_CLASSES (or if passed in as a commandline argument) + and runs them. + """ + cron_classes = options['cron_classes'] + if cron_classes: + cron_class_names = cron_classes + else: + cron_class_names = getattr(settings, 'CRON_CLASSES', []) + + try: + crons_to_run = [get_class(x) for x in cron_class_names] + except Exception: + error = traceback.format_exc() + self.stdout.write('Make sure these are valid cron class names: %s\n%s' % (cron_class_names, error)) + return + + for cron_class in crons_to_run: + run_cron_with_cache_check( + cron_class, + force=options['force'], + silent=options['silent'] + ) + + clear_old_log_entries() + close_old_connections() + + +def run_cron_with_cache_check(cron_class, force=False, silent=False): + """ + Checks the cache and runs the cron or not. + + @cron_class - cron class to run. + @force - run job even if not scheduled + @silent - suppress notifications + """ + + with CronJobManager(cron_class, silent) as manager: + manager.run(force) + + +def clear_old_log_entries(): + """ + Removes older log entries, if the appropriate setting has been set + """ + if hasattr(settings, 'DJANGO_CRON_DELETE_LOGS_OLDER_THAN'): + delta = timedelta(days=settings.DJANGO_CRON_DELETE_LOGS_OLDER_THAN) + CronJobLog.objects.filter(end_time__lt=get_current_time() - delta).delete() diff --git a/thesisenv/lib/python3.6/site-packages/django_cron/migrations/0001_initial.py b/thesisenv/lib/python3.6/site-packages/django_cron/migrations/0001_initial.py new file mode 100644 index 0000000..4f10227 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_cron/migrations/0001_initial.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import models, migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ] + + operations = [ + migrations.CreateModel( + name='CronJobLog', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('code', models.CharField(max_length=64, db_index=True)), + ('start_time', models.DateTimeField(db_index=True)), + ('end_time', models.DateTimeField(db_index=True)), + ('is_success', models.BooleanField(default=False)), + ('message', models.TextField(max_length=1000, blank=True)), + ('ran_at_time', models.TimeField(db_index=True, null=True, editable=False, blank=True)), + ], + ), + migrations.AlterIndexTogether( + name='cronjoblog', + index_together=set([('code', 'is_success', 'ran_at_time'), ('code', 'start_time', 'ran_at_time'), ('code', 'start_time')]), + ), + ] diff --git a/thesisenv/lib/python3.6/site-packages/django_cron/migrations/0002_remove_max_length_from_CronJobLog_message.py b/thesisenv/lib/python3.6/site-packages/django_cron/migrations/0002_remove_max_length_from_CronJobLog_message.py new file mode 100644 index 0000000..0e16622 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_cron/migrations/0002_remove_max_length_from_CronJobLog_message.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import models, migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('django_cron', '0001_initial'), + ] + + operations = [ + migrations.AlterField( + model_name='cronjoblog', + name='message', + field=models.TextField(default='', blank=True), + ), + ] diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/contrib/__init__.py b/thesisenv/lib/python3.6/site-packages/django_cron/migrations/__init__.py similarity index 100% rename from thesisenv/lib/python3.6/site-packages/celery/tests/contrib/__init__.py rename to thesisenv/lib/python3.6/site-packages/django_cron/migrations/__init__.py diff --git a/thesisenv/lib/python3.6/site-packages/django_cron/models.py b/thesisenv/lib/python3.6/site-packages/django_cron/models.py new file mode 100644 index 0000000..73e093a --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_cron/models.py @@ -0,0 +1,28 @@ +from django.db import models + + +class CronJobLog(models.Model): + """ + Keeps track of the cron jobs that ran etc. and any error + messages if they failed. + """ + code = models.CharField(max_length=64, db_index=True) + start_time = models.DateTimeField(db_index=True) + end_time = models.DateTimeField(db_index=True) + is_success = models.BooleanField(default=False) + message = models.TextField(default='', blank=True) # TODO: db_index=True + + # This field is used to mark jobs executed in exact time. + # Jobs that run every X minutes, have this field empty. + ran_at_time = models.TimeField(null=True, blank=True, db_index=True, editable=False) + + def __unicode__(self): + return '%s (%s)' % (self.code, 'Success' if self.is_success else 'Fail') + + class Meta: + index_together = [ + ('code', 'is_success', 'ran_at_time'), + ('code', 'start_time', 'ran_at_time'), + ('code', 'start_time') # useful when finding latest run (order by start_time) of cron + ] + app_label = 'django_cron' diff --git a/thesisenv/lib/python3.6/site-packages/django_cron/tests.py b/thesisenv/lib/python3.6/site-packages/django_cron/tests.py new file mode 100644 index 0000000..1ec872c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_cron/tests.py @@ -0,0 +1,181 @@ +import threading +from time import sleep +from datetime import timedelta + +from django import db +from django.test import TransactionTestCase +from django.core.management import call_command +from django.test.utils import override_settings +from django.test.client import Client +from django.urls import reverse +from django.contrib.auth.models import User + +from freezegun import freeze_time + +from django_cron.helpers import humanize_duration +from django_cron.models import CronJobLog + + +class OutBuffer(object): + content = [] + modified = False + _str_cache = '' + + def write(self, *args): + self.content.extend(args) + self.modified = True + + def str_content(self): + if self.modified: + self._str_cache = ''.join((str(x) for x in self.content)) + self.modified = False + + return self._str_cache + + +class TestCase(TransactionTestCase): + + success_cron = 'test_crons.TestSucessCronJob' + error_cron = 'test_crons.TestErrorCronJob' + five_mins_cron = 'test_crons.Test5minsCronJob' + run_at_times_cron = 'test_crons.TestRunAtTimesCronJob' + wait_3sec_cron = 'test_crons.Wait3secCronJob' + does_not_exist_cron = 'ThisCronObviouslyDoesntExist' + test_failed_runs_notification_cron = 'django_cron.cron.FailedRunsNotificationCronJob' + + def setUp(self): + CronJobLog.objects.all().delete() + + def test_success_cron(self): + logs_count = CronJobLog.objects.all().count() + call_command('runcrons', self.success_cron, force=True) + self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1) + + def test_failed_cron(self): + logs_count = CronJobLog.objects.all().count() + call_command('runcrons', self.error_cron, force=True) + self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1) + + def test_not_exists_cron(self): + logs_count = CronJobLog.objects.all().count() + out_buffer = OutBuffer() + call_command('runcrons', self.does_not_exist_cron, force=True, stdout=out_buffer) + + self.assertIn('Make sure these are valid cron class names', out_buffer.str_content()) + self.assertIn(self.does_not_exist_cron, out_buffer.str_content()) + self.assertEqual(CronJobLog.objects.all().count(), logs_count) + + @override_settings(DJANGO_CRON_LOCK_BACKEND='django_cron.backends.lock.file.FileLock') + def test_file_locking_backend(self): + logs_count = CronJobLog.objects.all().count() + call_command('runcrons', self.success_cron, force=True) + self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1) + + def test_runs_every_mins(self): + logs_count = CronJobLog.objects.all().count() + + with freeze_time("2014-01-01 00:00:00"): + call_command('runcrons', self.five_mins_cron) + self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1) + + with freeze_time("2014-01-01 00:04:59"): + call_command('runcrons', self.five_mins_cron) + self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1) + + with freeze_time("2014-01-01 00:05:01"): + call_command('runcrons', self.five_mins_cron) + self.assertEqual(CronJobLog.objects.all().count(), logs_count + 2) + + def test_runs_at_time(self): + logs_count = CronJobLog.objects.all().count() + with freeze_time("2014-01-01 00:00:01"): + call_command('runcrons', self.run_at_times_cron) + self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1) + + with freeze_time("2014-01-01 00:04:50"): + call_command('runcrons', self.run_at_times_cron) + self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1) + + with freeze_time("2014-01-01 00:05:01"): + call_command('runcrons', self.run_at_times_cron) + self.assertEqual(CronJobLog.objects.all().count(), logs_count + 2) + + def test_admin(self): + password = 'test' + user = User.objects.create_superuser( + 'test', + 'test@tivix.com', + password + ) + self.client = Client() + self.client.login(username=user.username, password=password) + + # edit CronJobLog object + call_command('runcrons', self.success_cron, force=True) + log = CronJobLog.objects.all()[0] + url = reverse('admin:django_cron_cronjoblog_change', args=(log.id,)) + response = self.client.get(url) + self.assertIn('Cron job logs', str(response.content)) + + def run_cronjob_in_thread(self, logs_count): + call_command('runcrons', self.wait_3sec_cron) + self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1) + db.close_old_connections() + + def test_cache_locking_backend(self): + """ + with cache locking backend + """ + logs_count = CronJobLog.objects.all().count() + t = threading.Thread(target=self.run_cronjob_in_thread, args=(logs_count,)) + t.daemon = True + t.start() + # this shouldn't get running + sleep(0.1) # to avoid race condition + call_command('runcrons', self.wait_3sec_cron) + t.join(10) + self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1) + + # TODO: this test doesn't pass - seems that second cronjob is locking file + # however it should throw an exception that file is locked by other cronjob + # @override_settings( + # DJANGO_CRON_LOCK_BACKEND='django_cron.backends.lock.file.FileLock', + # DJANGO_CRON_LOCKFILE_PATH=os.path.join(os.getcwd()) + # ) + # def test_file_locking_backend_in_thread(self): + # """ + # with file locking backend + # """ + # logs_count = CronJobLog.objects.all().count() + # t = threading.Thread(target=self.run_cronjob_in_thread, args=(logs_count,)) + # t.daemon = True + # t.start() + # # this shouldn't get running + # sleep(1) # to avoid race condition + # call_command('runcrons', self.wait_3sec_cron) + # t.join(10) + # self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1) + + def test_failed_runs_notification(self): + CronJobLog.objects.all().delete() + logs_count = CronJobLog.objects.all().count() + + for i in range(10): + call_command('runcrons', self.error_cron, force=True) + call_command('runcrons', self.test_failed_runs_notification_cron) + + self.assertEqual(CronJobLog.objects.all().count(), logs_count + 11) + + def test_humanize_duration(self): + test_subjects = ( + (timedelta(days=1, hours=1, minutes=1, seconds=1), '1 day, 1 hour, 1 minute, 1 second'), + (timedelta(days=2), '2 days'), + (timedelta(days=15, minutes=4), '15 days, 4 minutes'), + (timedelta(), '< 1 second'), + ) + + for duration, humanized in test_subjects: + self.assertEqual( + humanize_duration(duration), + humanized + ) diff --git a/thesisenv/lib/python3.6/site-packages/django_post_office-3.1.0.dist-info/DESCRIPTION.rst b/thesisenv/lib/python3.6/site-packages/django_post_office-3.1.0.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000..fc7832d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_post_office-3.1.0.dist-info/DESCRIPTION.rst @@ -0,0 +1,654 @@ +================== +Django Post Office +================== + +Django Post Office is a simple app to send and manage your emails in Django. +Some awesome features are: + +* Allows you to send email asynchronously +* Multi backend support +* Supports HTML email +* Supports database based email templates +* Built in scheduling support +* Works well with task queues like `RQ `_ or `Celery `_ +* Uses multiprocessing (and threading) to send a large number of emails in parallel +* Supports multilingual email templates (i18n) + + +Dependencies +============ + +* `django >= 1.8 `_ +* `django-jsonfield `_ + + +Installation +============ + +|Build Status| + + +* Install from PyPI (or you `manually download from PyPI `_):: + + pip install django-post_office + +* Add ``post_office`` to your INSTALLED_APPS in django's ``settings.py``: + + .. code-block:: python + + INSTALLED_APPS = ( + # other apps + "post_office", + ) + +* Run ``migrate``:: + + python manage.py migrate + +* Set ``post_office.EmailBackend`` as your ``EMAIL_BACKEND`` in django's ``settings.py``: + + .. code-block:: python + + EMAIL_BACKEND = 'post_office.EmailBackend' + + +Quickstart +========== + +Send a simple email is really easy: + +.. code-block:: python + + from post_office import mail + + mail.send( + 'recipient@example.com', # List of email addresses also accepted + 'from@example.com', + subject='My email', + message='Hi there!', + html_message='Hi there!', + ) + + +If you want to use templates, ensure that Django's admin interface is enabled. Create an +``EmailTemplate`` instance via ``admin`` and do the following: + +.. code-block:: python + + from post_office import mail + + mail.send( + 'recipient@example.com', # List of email addresses also accepted + 'from@example.com', + template='welcome_email', # Could be an EmailTemplate instance or name + context={'foo': 'bar'}, + ) + +The above command will put your email on the queue so you can use the +command in your webapp without slowing down the request/response cycle too much. +To actually send them out, run ``python manage.py send_queued_mail``. +You can schedule this management command to run regularly via cron:: + + * * * * * (/usr/bin/python manage.py send_queued_mail >> send_mail.log 2>&1) + +or, if you use uWSGI_ as application server, add this short snipped to the +project's ``wsgi.py`` file: + +.. code-block:: python + + from django.core.wsgi import get_wsgi_application + + application = get_wsgi_application() + + # add this block of code + try: + import uwsgidecorators + from django.core.management import call_command + + @uwsgidecorators.timer(10) + def send_queued_mail(num): + """Send queued mail every 10 seconds""" + call_command('send_queued_mail', processes=1) + + except ImportError: + print("uwsgidecorators not found. Cron and timers are disabled") + +Alternatively you can also use the decorator ``@uwsgidecorators.cron(minute, hour, day, month, weekday)``. +This will schedule a task at specific times. Use ``-1`` to signal any time, it corresponds to the ``*`` +in cron. + +Please note that ``uwsgidecorators`` are available only, if the application has been started +with **uWSGI**. However, Django's internal ``./manange.py runserver`` also access this file, +therefore wrap the block into an exception handler as shown above. + +This configuration is very useful in environments, such as Docker containers, where you +don't have a running cron-daemon. + + +Usage +===== + +mail.send() +----------- + +``mail.send`` is the most important function in this library, it takes these +arguments: + ++--------------------+----------+--------------------------------------------------+ +| Argument | Required | Description | ++--------------------+----------+--------------------------------------------------+ +| recipients | Yes | list of recipient email addresses | ++--------------------+----------+--------------------------------------------------+ +| sender | No | Defaults to ``settings.DEFAULT_FROM_EMAIL``, | +| | | display name is allowed (``John ``) | ++--------------------+----------+--------------------------------------------------+ +| subject | No | Email subject (if ``template`` is not specified) | ++--------------------+----------+--------------------------------------------------+ +| message | No | Email content (if ``template`` is not specified) | ++--------------------+----------+--------------------------------------------------+ +| html_message | No | HTML content (if ``template`` is not specified) | ++--------------------+----------+--------------------------------------------------+ +| template | No | ``EmailTemplate`` instance or name | ++--------------------+----------+--------------------------------------------------+ +| language | No | Language in which you want to send the email in | +| | | (if you have multilingual email templates.) | ++--------------------+----------+--------------------------------------------------+ +| cc | No | list emails, will appear in ``cc`` field | ++--------------------+----------+--------------------------------------------------+ +| bcc | No | list of emails, will appear in `bcc` field | ++--------------------+----------+--------------------------------------------------+ +| attachments | No | Email attachments - A dictionary where the keys | +| | | are the filenames and the values are either: | +| | | | +| | | * files | +| | | * file-like objects | +| | | * full path of the file | ++--------------------+----------+--------------------------------------------------+ +| context | No | A dictionary, used to render templated email | ++--------------------+----------+--------------------------------------------------+ +| headers | No | A dictionary of extra headers on the message | ++--------------------+----------+--------------------------------------------------+ +| scheduled_time | No | A date/datetime object indicating when the email | +| | | should be sent | ++--------------------+----------+--------------------------------------------------+ +| priority | No | ``high``, ``medium``, ``low`` or ``now`` | +| | | (send_immediately) | ++--------------------+----------+--------------------------------------------------+ +| backend | No | Alias of the backend you want to use. | +| | | ``default`` will be used if not specified. | ++--------------------+----------+--------------------------------------------------+ +| render_on_delivery | No | Setting this to ``True`` causes email to be | +| | | lazily rendered during delivery. ``template`` | +| | | is required when ``render_on_delivery`` is True. | +| | | This way content is never stored in the DB. | +| | | May result in significant space savings. | ++--------------------+----------+--------------------------------------------------+ + + +Here are a few examples. + +If you just want to send out emails without using database templates. You can +call the ``send`` command without the ``template`` argument. + +.. code-block:: python + + from post_office import mail + + mail.send( + ['recipient1@example.com'], + 'from@example.com', + subject='Welcome!', + message='Welcome home, {{ name }}!', + html_message='Welcome home, {{ name }}!', + headers={'Reply-to': 'reply@example.com'}, + scheduled_time=date(2014, 1, 1), + context={'name': 'Alice'}, + ) + +``post_office`` is also task queue friendly. Passing ``now`` as priority into +``send_mail`` will deliver the email right away (instead of queuing it), +regardless of how many emails you have in your queue: + +.. code-block:: python + + from post_office import mail + + mail.send( + ['recipient1@example.com'], + 'from@example.com', + template='welcome_email', + context={'foo': 'bar'}, + priority='now', + ) + +This is useful if you already use something like `django-rq `_ +to send emails asynchronously and only need to store email related activities and logs. + +If you want to send an email with attachments: + +.. code-block:: python + + from django.core.files.base import ContentFile + from post_office import mail + + mail.send( + ['recipient1@example.com'], + 'from@example.com', + template='welcome_email', + context={'foo': 'bar'}, + priority='now', + attachments={ + 'attachment1.doc': '/path/to/file/file1.doc', + 'attachment2.txt': ContentFile('file content'), + 'attachment3.txt': { 'file': ContentFile('file content'), 'mimetype': 'text/plain'}, + } + ) + +Template Tags and Variables +--------------------------- + +``post-office`` supports Django's template tags and variables. +For example, if you put "Hello, {{ name }}" in the subject line and pass in +``{'name': 'Alice'}`` as context, you will get "Hello, Alice" as subject: + +.. code-block:: python + + from post_office.models import EmailTemplate + from post_office import mail + + EmailTemplate.objects.create( + name='morning_greeting', + subject='Morning, {{ name|capfirst }}', + content='Hi {{ name }}, how are you feeling today?', + html_content='Hi {{ name }}, how are you feeling today?', + ) + + mail.send( + ['recipient@example.com'], + 'from@example.com', + template='morning_greeting', + context={'name': 'alice'}, + ) + + # This will create an email with the following content: + subject = 'Morning, Alice', + content = 'Hi alice, how are you feeling today?' + content = 'Hi alice, how are you feeling today?' + + +Multilingual Email Templates +---------------------------- + +You can easily create email templates in various different languanges. +For example: + +.. code-block:: python + + template = EmailTemplate.objects.create( + name='hello', + subject='Hello world!', + ) + + # Add an Indonesian version of this template: + indonesian_template = template.translated_templates.create( + language='id', + subject='Halo Dunia!' + ) + +Sending an email using template in a non default languange is +also similarly easy: + +.. code-block:: python + + mail.send( + ['recipient@example.com'], + 'from@example.com', + template=template, # Sends using the default template + ) + + mail.send( + ['recipient@example.com'], + 'from@example.com', + template=template, + language='id', # Sends using Indonesian template + ) + +Custom Email Backends +--------------------- + +By default, ``post_office`` uses django's ``smtp.EmailBackend``. If you want to +use a different backend, you can do so by configuring ``BACKENDS``. + +For example if you want to use `django-ses `_:: + + POST_OFFICE = { + 'BACKENDS': { + 'default': 'smtp.EmailBackend', + 'ses': 'django_ses.SESBackend', + } + } + +You can then choose what backend you want to use when sending mail: + +.. code-block:: python + + # If you omit `backend_alias` argument, `default` will be used + mail.send( + ['recipient@example.com'], + 'from@example.com', + subject='Hello', + ) + + # If you want to send using `ses` backend + mail.send( + ['recipient@example.com'], + 'from@example.com', + subject='Hello', + backend='ses', + ) + + +Management Commands +------------------- + +* ``send_queued_mail`` - send queued emails, those aren't successfully sent + will be marked as ``failed``. Accepts the following arguments: + ++---------------------------+--------------------------------------------------+ +| Argument | Description | ++---------------------------+--------------------------------------------------+ +| ``--processes`` or ``-p`` | Number of parallel processes to send email. | +| | Defaults to 1 | ++---------------------------+--------------------------------------------------+ +| ``--lockfile`` or ``-L`` | Full path to file used as lock file. Defaults to | +| | ``/tmp/post_office.lock`` | ++---------------------------+--------------------------------------------------+ + + +* ``cleanup_mail`` - delete all emails created before an X number of days + (defaults to 90). + ++---------------------------+--------------------------------------------------+ +| Argument | Description | ++---------------------------+--------------------------------------------------+ +| ``--days`` or ``-d`` | Email older than this argument will be deleted. | +| | Defaults to 90 | ++---------------------------+--------------------------------------------------+ +| ``--delete-attachments`` | Flag to delete orphaned attachment records and | +| or ``-da`` | files on disk. If flag is not set, | +| | on disk attachments files won't be deleted. | ++---------------------------+--------------------------------------------------+ + + +You may want to set these up via cron to run regularly:: + + * * * * * (cd $PROJECT; python manage.py send_queued_mail --processes=1 >> $PROJECT/cron_mail.log 2>&1) + 0 1 * * * (cd $PROJECT; python manage.py cleanup_mail --days=30 --delete-attachments >> $PROJECT/cron_mail_cleanup.log 2>&1) + +Settings +======== +This section outlines all the settings and configurations that you can put +in Django's ``settings.py`` to fine tune ``post-office``'s behavior. + +Batch Size +---------- + +If you may want to limit the number of emails sent in a batch (sometimes useful +in a low memory environment), use the ``BATCH_SIZE`` argument to limit the +number of queued emails fetched in one batch. + +.. code-block:: python + + # Put this in settings.py + POST_OFFICE = { + 'BATCH_SIZE': 50 + } + +Default Priority +---------------- + +The default priority for emails is ``medium``, but this can be altered by +setting ``DEFAULT_PRIORITY``. Integration with asynchronous email backends +(e.g. based on Celery) becomes trivial when set to ``now``. + +.. code-block:: python + + # Put this in settings.py + POST_OFFICE = { + 'DEFAULT_PRIORITY': 'now' + } + +Log Level +--------- + +The default log level is 2 (logs both successful and failed deliveries) +This behavior can be changed by setting ``LOG_LEVEL``. + +.. code-block:: python + + # Put this in settings.py + POST_OFFICE = { + 'LOG_LEVEL': 1 # Log only failed deliveries + } + +The different options are: + +* ``0`` logs nothing +* ``1`` logs only failed deliveries +* ``2`` logs everything (both successful and failed delivery attempts) + + +Sending Order +------------- + +The default sending order for emails is ``-priority``, but this can be altered by +setting ``SENDING_ORDER``. For example, if you want to send queued emails in FIFO order : + +.. code-block:: python + + # Put this in settings.py + POST_OFFICE = { + 'SENDING_ORDER': ['created'] + } + +Context Field Serializer +------------------------ + +If you need to store complex Python objects for deferred rendering +(i.e. setting ``render_on_delivery=True``), you can specify your own context +field class to store context variables. For example if you want to use +`django-picklefield `_: + +.. code-block:: python + + # Put this in settings.py + POST_OFFICE = { + 'CONTEXT_FIELD_CLASS': 'picklefield.fields.PickledObjectField' + } + +``CONTEXT_FIELD_CLASS`` defaults to ``jsonfield.JSONField``. + +Logging +------- + +You can configure ``post-office``'s logging from Django's ``settings.py``. For +example: + +.. code-block:: python + + LOGGING = { + "version": 1, + "disable_existing_loggers": False, + "formatters": { + "post_office": { + "format": "[%(levelname)s]%(asctime)s PID %(process)d: %(message)s", + "datefmt": "%d-%m-%Y %H:%M:%S", + }, + }, + "handlers": { + "post_office": { + "level": "DEBUG", + "class": "logging.StreamHandler", + "formatter": "post_office" + }, + # If you use sentry for logging + 'sentry': { + 'level': 'ERROR', + 'class': 'raven.contrib.django.handlers.SentryHandler', + }, + }, + 'loggers': { + "post_office": { + "handlers": ["post_office", "sentry"], + "level": "INFO" + }, + }, + } + + +Threads +------- + +``post-office`` >= 3.0 allows you to use multiple threads to dramatically speed up +the speed at which emails are sent. By default, ``post-office`` uses 5 threads per process. +You can tweak this setting by changing ``THREADS_PER_PROCESS`` setting. + +This may dramatically increase the speed of bulk email delivery, depending on which email +backends you use. In my tests, multi threading speeds up email backends that use HTTP based +(REST) delivery mechanisms but doesn't seem to help SMTP based backends. + +.. code-block:: python + + # Put this in settings.py + POST_OFFICE = { + 'THREADS_PER_PROCESS': 10 + } + + +Performance +=========== + +Caching +------- + +if Django's caching mechanism is configured, ``post_office`` will cache +``EmailTemplate`` instances . If for some reason you want to disable caching, +set ``POST_OFFICE_CACHE`` to ``False`` in ``settings.py``: + +.. code-block:: python + + ## All cache key will be prefixed by post_office:template: + ## To turn OFF caching, you need to explicitly set POST_OFFICE_CACHE to False in settings + POST_OFFICE_CACHE = False + + ## Optional: to use a non default cache backend, add a "post_office" entry in CACHES + CACHES = { + 'post_office': { + 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache', + 'LOCATION': '127.0.0.1:11211', + } + } + + +send_many() +----------- + +``send_many()`` is much more performant (generates less database queries) when +sending a large number of emails. ``send_many()`` is almost identical to ``mail.send()``, +with the exception that it accepts a list of keyword arguments that you'd +usually pass into ``mail.send()``: + +.. code-block:: python + + from post_office import mail + + first_email = { + 'sender': 'from@example.com', + 'recipients': ['alice@example.com'], + 'subject': 'Hi!', + 'message': 'Hi Alice!' + } + second_email = { + 'sender': 'from@example.com', + 'recipients': ['bob@example.com'], + 'subject': 'Hi!', + 'message': 'Hi Bob!' + } + kwargs_list = [first_email, second_email] + + mail.send_many(kwargs_list) + +Attachments are not supported with ``mail.send_many()``. + + +Running Tests +============= + +To run the test suite:: + + `which django-admin.py` test post_office --settings=post_office.test_settings --pythonpath=. + +You can run the full test suite with:: + + tox + +or:: + + python setup.py test + + +Changelog +========= + +Version 3.1.0 (2018-07-24) +-------------------------- +* Improvements to attachments are handled. Thanks @SeiryuZ! +* Added ``--delete-attachments`` flag to ``cleanup_mail`` management command. Thanks @Seiryuz! +* I18n improvements. Thanks @vsevolod-skripnik and @delneg! +* Django admin improvements. Thanks @kakulukia! + + +Version 3.0.4 +------------- +* Added compatibility with Django 2.0. Thanks @PreActionTech and @PetrDlouhy! +* Added natural key support to `EmailTemplate` model. Thanks @maximlomakin! + + +Version 3.0.2 +------------- +- Fixed memory leak when multiprocessing is used. +- Fixed a possible error when adding a new email from Django admin. Thanks @ivlevdenis! + + +Version 3.0.2 +------------- +- `_send_bulk` now properly catches exceptions when preparing email messages. + + +Version 3.0.1 +------------- +- Fixed an infinite loop bug in `send_queued_mail` management command. + + +Version 3.0.0 +------------- +* `_send_bulk` now allows each process to use multiple threads to send emails. +* Added support for mimetypes in email attachments. Thanks @clickonchris! +* An `EmailTemplate` can now be used as defaults multiple times in one language. Thanks @sac7e! +* `send_queued_mail` management command will now check whether there are more queued emails to be sent before exiting. +* Drop support for Django < 1.8. Thanks @fendyh! + + +Full changelog can be found `here `_. + + +Created and maintained by the cool guys at `Stamps `_, +Indonesia's most elegant CRM/loyalty platform. + + +.. |Build Status| image:: https://travis-ci.org/ui/django-post_office.png?branch=master + :target: https://travis-ci.org/ui/django-post_office + +.. _uWSGI: https://uwsgi-docs.readthedocs.org/en/latest/ + + diff --git a/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/INSTALLER b/thesisenv/lib/python3.6/site-packages/django_post_office-3.1.0.dist-info/INSTALLER similarity index 100% rename from thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/INSTALLER rename to thesisenv/lib/python3.6/site-packages/django_post_office-3.1.0.dist-info/INSTALLER diff --git a/thesisenv/lib/python3.6/site-packages/django_post_office-3.1.0.dist-info/METADATA b/thesisenv/lib/python3.6/site-packages/django_post_office-3.1.0.dist-info/METADATA new file mode 100644 index 0000000..2bdcf2f --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_post_office-3.1.0.dist-info/METADATA @@ -0,0 +1,685 @@ +Metadata-Version: 2.0 +Name: django-post-office +Version: 3.1.0 +Summary: A Django app to monitor and send mail asynchronously, complete with template support. +Home-page: https://github.com/ui/django-post_office +Author: Selwin Ong +Author-email: selwin.ong@gmail.com +License: MIT +Description-Content-Type: UNKNOWN +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Framework :: Django +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Topic :: Internet :: WWW/HTTP +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Dist: django (>=1.8) +Requires-Dist: jsonfield +Provides-Extra: test +Requires-Dist: tox (>=2.3); extra == 'test' + +================== +Django Post Office +================== + +Django Post Office is a simple app to send and manage your emails in Django. +Some awesome features are: + +* Allows you to send email asynchronously +* Multi backend support +* Supports HTML email +* Supports database based email templates +* Built in scheduling support +* Works well with task queues like `RQ `_ or `Celery `_ +* Uses multiprocessing (and threading) to send a large number of emails in parallel +* Supports multilingual email templates (i18n) + + +Dependencies +============ + +* `django >= 1.8 `_ +* `django-jsonfield `_ + + +Installation +============ + +|Build Status| + + +* Install from PyPI (or you `manually download from PyPI `_):: + + pip install django-post_office + +* Add ``post_office`` to your INSTALLED_APPS in django's ``settings.py``: + + .. code-block:: python + + INSTALLED_APPS = ( + # other apps + "post_office", + ) + +* Run ``migrate``:: + + python manage.py migrate + +* Set ``post_office.EmailBackend`` as your ``EMAIL_BACKEND`` in django's ``settings.py``: + + .. code-block:: python + + EMAIL_BACKEND = 'post_office.EmailBackend' + + +Quickstart +========== + +Send a simple email is really easy: + +.. code-block:: python + + from post_office import mail + + mail.send( + 'recipient@example.com', # List of email addresses also accepted + 'from@example.com', + subject='My email', + message='Hi there!', + html_message='Hi there!', + ) + + +If you want to use templates, ensure that Django's admin interface is enabled. Create an +``EmailTemplate`` instance via ``admin`` and do the following: + +.. code-block:: python + + from post_office import mail + + mail.send( + 'recipient@example.com', # List of email addresses also accepted + 'from@example.com', + template='welcome_email', # Could be an EmailTemplate instance or name + context={'foo': 'bar'}, + ) + +The above command will put your email on the queue so you can use the +command in your webapp without slowing down the request/response cycle too much. +To actually send them out, run ``python manage.py send_queued_mail``. +You can schedule this management command to run regularly via cron:: + + * * * * * (/usr/bin/python manage.py send_queued_mail >> send_mail.log 2>&1) + +or, if you use uWSGI_ as application server, add this short snipped to the +project's ``wsgi.py`` file: + +.. code-block:: python + + from django.core.wsgi import get_wsgi_application + + application = get_wsgi_application() + + # add this block of code + try: + import uwsgidecorators + from django.core.management import call_command + + @uwsgidecorators.timer(10) + def send_queued_mail(num): + """Send queued mail every 10 seconds""" + call_command('send_queued_mail', processes=1) + + except ImportError: + print("uwsgidecorators not found. Cron and timers are disabled") + +Alternatively you can also use the decorator ``@uwsgidecorators.cron(minute, hour, day, month, weekday)``. +This will schedule a task at specific times. Use ``-1`` to signal any time, it corresponds to the ``*`` +in cron. + +Please note that ``uwsgidecorators`` are available only, if the application has been started +with **uWSGI**. However, Django's internal ``./manange.py runserver`` also access this file, +therefore wrap the block into an exception handler as shown above. + +This configuration is very useful in environments, such as Docker containers, where you +don't have a running cron-daemon. + + +Usage +===== + +mail.send() +----------- + +``mail.send`` is the most important function in this library, it takes these +arguments: + ++--------------------+----------+--------------------------------------------------+ +| Argument | Required | Description | ++--------------------+----------+--------------------------------------------------+ +| recipients | Yes | list of recipient email addresses | ++--------------------+----------+--------------------------------------------------+ +| sender | No | Defaults to ``settings.DEFAULT_FROM_EMAIL``, | +| | | display name is allowed (``John ``) | ++--------------------+----------+--------------------------------------------------+ +| subject | No | Email subject (if ``template`` is not specified) | ++--------------------+----------+--------------------------------------------------+ +| message | No | Email content (if ``template`` is not specified) | ++--------------------+----------+--------------------------------------------------+ +| html_message | No | HTML content (if ``template`` is not specified) | ++--------------------+----------+--------------------------------------------------+ +| template | No | ``EmailTemplate`` instance or name | ++--------------------+----------+--------------------------------------------------+ +| language | No | Language in which you want to send the email in | +| | | (if you have multilingual email templates.) | ++--------------------+----------+--------------------------------------------------+ +| cc | No | list emails, will appear in ``cc`` field | ++--------------------+----------+--------------------------------------------------+ +| bcc | No | list of emails, will appear in `bcc` field | ++--------------------+----------+--------------------------------------------------+ +| attachments | No | Email attachments - A dictionary where the keys | +| | | are the filenames and the values are either: | +| | | | +| | | * files | +| | | * file-like objects | +| | | * full path of the file | ++--------------------+----------+--------------------------------------------------+ +| context | No | A dictionary, used to render templated email | ++--------------------+----------+--------------------------------------------------+ +| headers | No | A dictionary of extra headers on the message | ++--------------------+----------+--------------------------------------------------+ +| scheduled_time | No | A date/datetime object indicating when the email | +| | | should be sent | ++--------------------+----------+--------------------------------------------------+ +| priority | No | ``high``, ``medium``, ``low`` or ``now`` | +| | | (send_immediately) | ++--------------------+----------+--------------------------------------------------+ +| backend | No | Alias of the backend you want to use. | +| | | ``default`` will be used if not specified. | ++--------------------+----------+--------------------------------------------------+ +| render_on_delivery | No | Setting this to ``True`` causes email to be | +| | | lazily rendered during delivery. ``template`` | +| | | is required when ``render_on_delivery`` is True. | +| | | This way content is never stored in the DB. | +| | | May result in significant space savings. | ++--------------------+----------+--------------------------------------------------+ + + +Here are a few examples. + +If you just want to send out emails without using database templates. You can +call the ``send`` command without the ``template`` argument. + +.. code-block:: python + + from post_office import mail + + mail.send( + ['recipient1@example.com'], + 'from@example.com', + subject='Welcome!', + message='Welcome home, {{ name }}!', + html_message='Welcome home, {{ name }}!', + headers={'Reply-to': 'reply@example.com'}, + scheduled_time=date(2014, 1, 1), + context={'name': 'Alice'}, + ) + +``post_office`` is also task queue friendly. Passing ``now`` as priority into +``send_mail`` will deliver the email right away (instead of queuing it), +regardless of how many emails you have in your queue: + +.. code-block:: python + + from post_office import mail + + mail.send( + ['recipient1@example.com'], + 'from@example.com', + template='welcome_email', + context={'foo': 'bar'}, + priority='now', + ) + +This is useful if you already use something like `django-rq `_ +to send emails asynchronously and only need to store email related activities and logs. + +If you want to send an email with attachments: + +.. code-block:: python + + from django.core.files.base import ContentFile + from post_office import mail + + mail.send( + ['recipient1@example.com'], + 'from@example.com', + template='welcome_email', + context={'foo': 'bar'}, + priority='now', + attachments={ + 'attachment1.doc': '/path/to/file/file1.doc', + 'attachment2.txt': ContentFile('file content'), + 'attachment3.txt': { 'file': ContentFile('file content'), 'mimetype': 'text/plain'}, + } + ) + +Template Tags and Variables +--------------------------- + +``post-office`` supports Django's template tags and variables. +For example, if you put "Hello, {{ name }}" in the subject line and pass in +``{'name': 'Alice'}`` as context, you will get "Hello, Alice" as subject: + +.. code-block:: python + + from post_office.models import EmailTemplate + from post_office import mail + + EmailTemplate.objects.create( + name='morning_greeting', + subject='Morning, {{ name|capfirst }}', + content='Hi {{ name }}, how are you feeling today?', + html_content='Hi {{ name }}, how are you feeling today?', + ) + + mail.send( + ['recipient@example.com'], + 'from@example.com', + template='morning_greeting', + context={'name': 'alice'}, + ) + + # This will create an email with the following content: + subject = 'Morning, Alice', + content = 'Hi alice, how are you feeling today?' + content = 'Hi alice, how are you feeling today?' + + +Multilingual Email Templates +---------------------------- + +You can easily create email templates in various different languanges. +For example: + +.. code-block:: python + + template = EmailTemplate.objects.create( + name='hello', + subject='Hello world!', + ) + + # Add an Indonesian version of this template: + indonesian_template = template.translated_templates.create( + language='id', + subject='Halo Dunia!' + ) + +Sending an email using template in a non default languange is +also similarly easy: + +.. code-block:: python + + mail.send( + ['recipient@example.com'], + 'from@example.com', + template=template, # Sends using the default template + ) + + mail.send( + ['recipient@example.com'], + 'from@example.com', + template=template, + language='id', # Sends using Indonesian template + ) + +Custom Email Backends +--------------------- + +By default, ``post_office`` uses django's ``smtp.EmailBackend``. If you want to +use a different backend, you can do so by configuring ``BACKENDS``. + +For example if you want to use `django-ses `_:: + + POST_OFFICE = { + 'BACKENDS': { + 'default': 'smtp.EmailBackend', + 'ses': 'django_ses.SESBackend', + } + } + +You can then choose what backend you want to use when sending mail: + +.. code-block:: python + + # If you omit `backend_alias` argument, `default` will be used + mail.send( + ['recipient@example.com'], + 'from@example.com', + subject='Hello', + ) + + # If you want to send using `ses` backend + mail.send( + ['recipient@example.com'], + 'from@example.com', + subject='Hello', + backend='ses', + ) + + +Management Commands +------------------- + +* ``send_queued_mail`` - send queued emails, those aren't successfully sent + will be marked as ``failed``. Accepts the following arguments: + ++---------------------------+--------------------------------------------------+ +| Argument | Description | ++---------------------------+--------------------------------------------------+ +| ``--processes`` or ``-p`` | Number of parallel processes to send email. | +| | Defaults to 1 | ++---------------------------+--------------------------------------------------+ +| ``--lockfile`` or ``-L`` | Full path to file used as lock file. Defaults to | +| | ``/tmp/post_office.lock`` | ++---------------------------+--------------------------------------------------+ + + +* ``cleanup_mail`` - delete all emails created before an X number of days + (defaults to 90). + ++---------------------------+--------------------------------------------------+ +| Argument | Description | ++---------------------------+--------------------------------------------------+ +| ``--days`` or ``-d`` | Email older than this argument will be deleted. | +| | Defaults to 90 | ++---------------------------+--------------------------------------------------+ +| ``--delete-attachments`` | Flag to delete orphaned attachment records and | +| or ``-da`` | files on disk. If flag is not set, | +| | on disk attachments files won't be deleted. | ++---------------------------+--------------------------------------------------+ + + +You may want to set these up via cron to run regularly:: + + * * * * * (cd $PROJECT; python manage.py send_queued_mail --processes=1 >> $PROJECT/cron_mail.log 2>&1) + 0 1 * * * (cd $PROJECT; python manage.py cleanup_mail --days=30 --delete-attachments >> $PROJECT/cron_mail_cleanup.log 2>&1) + +Settings +======== +This section outlines all the settings and configurations that you can put +in Django's ``settings.py`` to fine tune ``post-office``'s behavior. + +Batch Size +---------- + +If you may want to limit the number of emails sent in a batch (sometimes useful +in a low memory environment), use the ``BATCH_SIZE`` argument to limit the +number of queued emails fetched in one batch. + +.. code-block:: python + + # Put this in settings.py + POST_OFFICE = { + 'BATCH_SIZE': 50 + } + +Default Priority +---------------- + +The default priority for emails is ``medium``, but this can be altered by +setting ``DEFAULT_PRIORITY``. Integration with asynchronous email backends +(e.g. based on Celery) becomes trivial when set to ``now``. + +.. code-block:: python + + # Put this in settings.py + POST_OFFICE = { + 'DEFAULT_PRIORITY': 'now' + } + +Log Level +--------- + +The default log level is 2 (logs both successful and failed deliveries) +This behavior can be changed by setting ``LOG_LEVEL``. + +.. code-block:: python + + # Put this in settings.py + POST_OFFICE = { + 'LOG_LEVEL': 1 # Log only failed deliveries + } + +The different options are: + +* ``0`` logs nothing +* ``1`` logs only failed deliveries +* ``2`` logs everything (both successful and failed delivery attempts) + + +Sending Order +------------- + +The default sending order for emails is ``-priority``, but this can be altered by +setting ``SENDING_ORDER``. For example, if you want to send queued emails in FIFO order : + +.. code-block:: python + + # Put this in settings.py + POST_OFFICE = { + 'SENDING_ORDER': ['created'] + } + +Context Field Serializer +------------------------ + +If you need to store complex Python objects for deferred rendering +(i.e. setting ``render_on_delivery=True``), you can specify your own context +field class to store context variables. For example if you want to use +`django-picklefield `_: + +.. code-block:: python + + # Put this in settings.py + POST_OFFICE = { + 'CONTEXT_FIELD_CLASS': 'picklefield.fields.PickledObjectField' + } + +``CONTEXT_FIELD_CLASS`` defaults to ``jsonfield.JSONField``. + +Logging +------- + +You can configure ``post-office``'s logging from Django's ``settings.py``. For +example: + +.. code-block:: python + + LOGGING = { + "version": 1, + "disable_existing_loggers": False, + "formatters": { + "post_office": { + "format": "[%(levelname)s]%(asctime)s PID %(process)d: %(message)s", + "datefmt": "%d-%m-%Y %H:%M:%S", + }, + }, + "handlers": { + "post_office": { + "level": "DEBUG", + "class": "logging.StreamHandler", + "formatter": "post_office" + }, + # If you use sentry for logging + 'sentry': { + 'level': 'ERROR', + 'class': 'raven.contrib.django.handlers.SentryHandler', + }, + }, + 'loggers': { + "post_office": { + "handlers": ["post_office", "sentry"], + "level": "INFO" + }, + }, + } + + +Threads +------- + +``post-office`` >= 3.0 allows you to use multiple threads to dramatically speed up +the speed at which emails are sent. By default, ``post-office`` uses 5 threads per process. +You can tweak this setting by changing ``THREADS_PER_PROCESS`` setting. + +This may dramatically increase the speed of bulk email delivery, depending on which email +backends you use. In my tests, multi threading speeds up email backends that use HTTP based +(REST) delivery mechanisms but doesn't seem to help SMTP based backends. + +.. code-block:: python + + # Put this in settings.py + POST_OFFICE = { + 'THREADS_PER_PROCESS': 10 + } + + +Performance +=========== + +Caching +------- + +if Django's caching mechanism is configured, ``post_office`` will cache +``EmailTemplate`` instances . If for some reason you want to disable caching, +set ``POST_OFFICE_CACHE`` to ``False`` in ``settings.py``: + +.. code-block:: python + + ## All cache key will be prefixed by post_office:template: + ## To turn OFF caching, you need to explicitly set POST_OFFICE_CACHE to False in settings + POST_OFFICE_CACHE = False + + ## Optional: to use a non default cache backend, add a "post_office" entry in CACHES + CACHES = { + 'post_office': { + 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache', + 'LOCATION': '127.0.0.1:11211', + } + } + + +send_many() +----------- + +``send_many()`` is much more performant (generates less database queries) when +sending a large number of emails. ``send_many()`` is almost identical to ``mail.send()``, +with the exception that it accepts a list of keyword arguments that you'd +usually pass into ``mail.send()``: + +.. code-block:: python + + from post_office import mail + + first_email = { + 'sender': 'from@example.com', + 'recipients': ['alice@example.com'], + 'subject': 'Hi!', + 'message': 'Hi Alice!' + } + second_email = { + 'sender': 'from@example.com', + 'recipients': ['bob@example.com'], + 'subject': 'Hi!', + 'message': 'Hi Bob!' + } + kwargs_list = [first_email, second_email] + + mail.send_many(kwargs_list) + +Attachments are not supported with ``mail.send_many()``. + + +Running Tests +============= + +To run the test suite:: + + `which django-admin.py` test post_office --settings=post_office.test_settings --pythonpath=. + +You can run the full test suite with:: + + tox + +or:: + + python setup.py test + + +Changelog +========= + +Version 3.1.0 (2018-07-24) +-------------------------- +* Improvements to attachments are handled. Thanks @SeiryuZ! +* Added ``--delete-attachments`` flag to ``cleanup_mail`` management command. Thanks @Seiryuz! +* I18n improvements. Thanks @vsevolod-skripnik and @delneg! +* Django admin improvements. Thanks @kakulukia! + + +Version 3.0.4 +------------- +* Added compatibility with Django 2.0. Thanks @PreActionTech and @PetrDlouhy! +* Added natural key support to `EmailTemplate` model. Thanks @maximlomakin! + + +Version 3.0.2 +------------- +- Fixed memory leak when multiprocessing is used. +- Fixed a possible error when adding a new email from Django admin. Thanks @ivlevdenis! + + +Version 3.0.2 +------------- +- `_send_bulk` now properly catches exceptions when preparing email messages. + + +Version 3.0.1 +------------- +- Fixed an infinite loop bug in `send_queued_mail` management command. + + +Version 3.0.0 +------------- +* `_send_bulk` now allows each process to use multiple threads to send emails. +* Added support for mimetypes in email attachments. Thanks @clickonchris! +* An `EmailTemplate` can now be used as defaults multiple times in one language. Thanks @sac7e! +* `send_queued_mail` management command will now check whether there are more queued emails to be sent before exiting. +* Drop support for Django < 1.8. Thanks @fendyh! + + +Full changelog can be found `here `_. + + +Created and maintained by the cool guys at `Stamps `_, +Indonesia's most elegant CRM/loyalty platform. + + +.. |Build Status| image:: https://travis-ci.org/ui/django-post_office.png?branch=master + :target: https://travis-ci.org/ui/django-post_office + +.. _uWSGI: https://uwsgi-docs.readthedocs.org/en/latest/ + + diff --git a/thesisenv/lib/python3.6/site-packages/django_post_office-3.1.0.dist-info/RECORD b/thesisenv/lib/python3.6/site-packages/django_post_office-3.1.0.dist-info/RECORD new file mode 100644 index 0000000..b22f417 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_post_office-3.1.0.dist-info/RECORD @@ -0,0 +1,95 @@ +django_post_office-3.1.0.dist-info/DESCRIPTION.rst,sha256=fq6f9SdPAs7jJl7BWe_S9ko0vLKU4Yarl_f1aTu68_Q,22039 +django_post_office-3.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +django_post_office-3.1.0.dist-info/METADATA,sha256=ax9BP4Si7dNXTUuIQ5bMgtr0u0JvkEQjvDKdP8qXavY,23265 +django_post_office-3.1.0.dist-info/RECORD,, +django_post_office-3.1.0.dist-info/WHEEL,sha256=kdsN-5OJAZIiHN-iO4Rhl82KyS0bDWf4uBwMbkNafr8,110 +django_post_office-3.1.0.dist-info/metadata.json,sha256=EM2Mho7BIVj-ZWfq4Cl6uwNx6as_NqbCf1MMU0qaxLU,1353 +django_post_office-3.1.0.dist-info/top_level.txt,sha256=UM3cswoGuzxOuDS20Qu9QVSXmMCn5t3ZZniWAtZ6Dn4,12 +post_office/__init__.py,sha256=eVpmOKhL7n_V2TQ2upYQRcfmVbrKEFFYsM71Hv3F738,114 +post_office/__pycache__/__init__.cpython-36.pyc,, +post_office/__pycache__/admin.cpython-36.pyc,, +post_office/__pycache__/apps.cpython-36.pyc,, +post_office/__pycache__/backends.cpython-36.pyc,, +post_office/__pycache__/cache.cpython-36.pyc,, +post_office/__pycache__/compat.cpython-36.pyc,, +post_office/__pycache__/connections.cpython-36.pyc,, +post_office/__pycache__/fields.cpython-36.pyc,, +post_office/__pycache__/lockfile.cpython-36.pyc,, +post_office/__pycache__/logutils.cpython-36.pyc,, +post_office/__pycache__/mail.cpython-36.pyc,, +post_office/__pycache__/models.cpython-36.pyc,, +post_office/__pycache__/settings.cpython-36.pyc,, +post_office/__pycache__/test_settings.cpython-36.pyc,, +post_office/__pycache__/test_urls.cpython-36.pyc,, +post_office/__pycache__/utils.cpython-36.pyc,, +post_office/__pycache__/validators.cpython-36.pyc,, +post_office/__pycache__/views.cpython-36.pyc,, +post_office/admin.py,sha256=wQqT_r9mBJ3E2t_aKSF2T8WHxgly6a4UmcyF3eyDA7Q,4922 +post_office/apps.py,sha256=dv8AJIBvIec64Xy2BFKKd1R-8eEbLT0XBBiHXWQzS5Q,188 +post_office/backends.py,sha256=5s2UTtFz2DIdaJMtzJV06bm4pP6IsaSrx1eHt3qK6yY,1855 +post_office/cache.py,sha256=O39Foxqg8gxn2TBaTiT6wlyTKFE7Y6F-rSPLrB6ESkk,646 +post_office/compat.py,sha256=5adNRlBcuvNqwDBjIDnbX2wVabGnj3fQTRr9e5PT-0I,1076 +post_office/connections.py,sha256=REO_ns9KT42rhBFX4b8ABzBpkGiR8uO429j4DSOhKeU,1145 +post_office/fields.py,sha256=9MgoIuXNm6mqrAuTqzMxfd4JiOSFx7DXWILXWnOU7ds,1973 +post_office/locale/de/LC_MESSAGES/django.mo,sha256=JGLLjuEXSDqKzq0RnBjnIZX23aV0988oHNGCgD9J3sk,1632 +post_office/locale/de/LC_MESSAGES/django.po,sha256=WBTJcJK_0SGC6O4nbE2DMaCLdrmQRHyE1RRRUbedmQE,2395 +post_office/locale/it/LC_MESSAGES/django.mo,sha256=6hqUjb2Y3T21QnOya5bgY-gkMi4QhJxEoKv6QSAF9Mg,1611 +post_office/locale/it/LC_MESSAGES/django.po,sha256=h-hoj6fTJwcgVB9vf9UKsEvzjWJ2nmYFoDj5JlYQro0,2379 +post_office/locale/pl/LC_MESSAGES/django.mo,sha256=k_bOkQR787kAqHKUGxcRco5Mkggl2Hwr_bR3d05q6Ug,2698 +post_office/locale/pl/LC_MESSAGES/django.po,sha256=tDDtQeMufPFoSYSgILfvgCSzqnpRG6V_Sngbwwhsv_4,4300 +post_office/locale/ru_RU/LC_MESSAGES/django.mo,sha256=Op7j2z0Hii-uT6WVa7aqen60Rahz1RNXyp7GoHPnQ_E,3274 +post_office/locale/ru_RU/LC_MESSAGES/django.po,sha256=UIsDRCnv0tc11sjFSUgJjOLcXGr-lHgIpp2ZOcAhrls,4901 +post_office/lockfile.py,sha256=RS3c_b5jWi1z1pGH-7TsT8gwN5ueDdtHSpiIkbE64l4,4594 +post_office/logutils.py,sha256=gTa5EeuZu3UHiyR-4XvGVl-LRJ1vM8lHoLW274wTDNs,1066 +post_office/mail.py,sha256=O8kZsccs30rKpDAreIM6mC6YfdI72Mu0gv_Fb-ks8DY,10135 +post_office/management/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +post_office/management/__pycache__/__init__.cpython-36.pyc,, +post_office/management/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +post_office/management/commands/__pycache__/__init__.cpython-36.pyc,, +post_office/management/commands/__pycache__/cleanup_mail.cpython-36.pyc,, +post_office/management/commands/__pycache__/send_queued_mail.cpython-36.pyc,, +post_office/management/commands/cleanup_mail.py,sha256=LKO3SWjbIMzlXsbUDychhkX3Lt9AO8eZdtAdYo0Nin0,1412 +post_office/management/commands/send_queued_mail.py,sha256=ALkDylHVflNLydBTI96iBl6xXJtCW-W1ziG4BFhOisA,1999 +post_office/migrations/0001_initial.py,sha256=7TaB2WEljVeM1niR6N_cYWz4PwXT5mxirHRlfwIdJeg,4635 +post_office/migrations/0002_add_i18n_and_backend_alias.py,sha256=BtWyGrAybC5n7VhUup3GiREZJk6iMgxmSgMAy0IzmeE,5489 +post_office/migrations/0003_longer_subject.py,sha256=ZnXopRJEeY_gfvj01t5lthde98HJZa91W3GrmyXp3lg,2749 +post_office/migrations/0004_auto_20160607_0901.py,sha256=37BgmxLeGYL0RzYI-5flsCKs3obgqzNQmWEVZyitTLQ,5025 +post_office/migrations/0005_auto_20170515_0013.py,sha256=EuA_SPdWF1e-TVBTzrO-z1sFZbiJaPV1ClLhw_GPlvc,456 +post_office/migrations/0006_attachment_mimetype.py,sha256=UBDnxzP1KWCaEy1nBYQW6Zuv2EB-36w82jBxVAlkhgY,435 +post_office/migrations/0007_auto_20170731_1342.py,sha256=LK8zW5vrxdjG07w5NMr3kisgdx-aBu1PEu2H-LPaNgM,498 +post_office/migrations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +post_office/migrations/__pycache__/0001_initial.cpython-36.pyc,, +post_office/migrations/__pycache__/0002_add_i18n_and_backend_alias.cpython-36.pyc,, +post_office/migrations/__pycache__/0003_longer_subject.cpython-36.pyc,, +post_office/migrations/__pycache__/0004_auto_20160607_0901.cpython-36.pyc,, +post_office/migrations/__pycache__/0005_auto_20170515_0013.cpython-36.pyc,, +post_office/migrations/__pycache__/0006_attachment_mimetype.cpython-36.pyc,, +post_office/migrations/__pycache__/0007_auto_20170731_1342.cpython-36.pyc,, +post_office/migrations/__pycache__/__init__.cpython-36.pyc,, +post_office/models.py,sha256=1WOu1px_vWwJzRAtmfoxfIRRx5jPOhjTS2cwltZ6GPM,10942 +post_office/settings.py,sha256=PlKALPJvOo9AK326JjSMqI6U5-aO-aZSHqn8cSZUjaI,2584 +post_office/test_settings.py,sha256=DkeMMDFiF6s2PCW0FoHv9pXJ8Lu5M5Vo25hdzKV-y3I,2293 +post_office/test_urls.py,sha256=jFmkObKRb7-bE2nWqfQ49eSC51F3ayWRCrtymaEr378,123 +post_office/tests/__init__.py,sha256=r8KNPnxhYYPrY_1Ko9SSo7Y1DcHlrZL2h6dRtqDePXk,287 +post_office/tests/__pycache__/__init__.cpython-36.pyc,, +post_office/tests/__pycache__/test_backends.cpython-36.pyc,, +post_office/tests/__pycache__/test_cache.cpython-36.pyc,, +post_office/tests/__pycache__/test_commands.cpython-36.pyc,, +post_office/tests/__pycache__/test_connections.cpython-36.pyc,, +post_office/tests/__pycache__/test_lockfile.cpython-36.pyc,, +post_office/tests/__pycache__/test_mail.cpython-36.pyc,, +post_office/tests/__pycache__/test_models.cpython-36.pyc,, +post_office/tests/__pycache__/test_utils.cpython-36.pyc,, +post_office/tests/__pycache__/test_views.cpython-36.pyc,, +post_office/tests/test_backends.py,sha256=ZCP30v7ZO_sqsW-HHmIqQYWYE7DsQCQZGG7mxXaZhPc,4636 +post_office/tests/test_cache.py,sha256=qKxuSytd8md9JgOFn_R1C0wLEt4ta_akN0c7A6Mwa9E,1437 +post_office/tests/test_commands.py,sha256=NXijHn86wXaVrE1hcxiKvJKx-Tn4Ce6YmRPNqyt5U6A,6059 +post_office/tests/test_connections.py,sha256=QL_EIy_Pjdv4dCNtLLEF_h2Vso3DrObcG1C4Ds06AIw,459 +post_office/tests/test_lockfile.py,sha256=MMLgRhwV5xW72DlWR62yM_KMqQCutCfbDx8iWNs_Sas,1943 +post_office/tests/test_mail.py,sha256=fJydVnGF5GRojLg2Oq2l_f_RyJQjVlmxMo5iFXUIREo,16449 +post_office/tests/test_models.py,sha256=_1gtj0yL6ur1MQ2o_iq567TXAib1oReD1LB_LilNSvo,14960 +post_office/tests/test_utils.py,sha256=_AqeqTZZmsGDQHosVeESOvzMCQ2uD_R0hbMCJrxDIso,8476 +post_office/tests/test_views.py,sha256=feYG3bTnQDVRYDKCIsKX_UJerKEaegZXGze20NHrT_Y,1208 +post_office/utils.py,sha256=aC8oilDFjQKBtNgZc__GhVX_y22cQrFPU5oKZ-CX-sc,4556 +post_office/validators.py,sha256=q8umHXtqM4F93vh0g6m7x-U2eM347w9L6qVXXelF-v4,1409 +post_office/views.py,sha256=F42JXgnqFqK0fajXeutyJJxwOszRxoLMNkIhfc4Z7KI,26 diff --git a/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/WHEEL b/thesisenv/lib/python3.6/site-packages/django_post_office-3.1.0.dist-info/WHEEL similarity index 100% rename from thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/WHEEL rename to thesisenv/lib/python3.6/site-packages/django_post_office-3.1.0.dist-info/WHEEL diff --git a/thesisenv/lib/python3.6/site-packages/django_post_office-3.1.0.dist-info/metadata.json b/thesisenv/lib/python3.6/site-packages/django_post_office-3.1.0.dist-info/metadata.json new file mode 100644 index 0000000..6cf5154 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_post_office-3.1.0.dist-info/metadata.json @@ -0,0 +1 @@ +{"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Framework :: Django", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Topic :: Internet :: WWW/HTTP", "Topic :: Software Development :: Libraries :: Python Modules"], "description_content_type": "UNKNOWN", "extensions": {"python.details": {"contacts": [{"email": "selwin.ong@gmail.com", "name": "Selwin Ong", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://github.com/ui/django-post_office"}}}, "extras": ["test"], "generator": "bdist_wheel (0.30.0)", "license": "MIT", "metadata_version": "2.0", "name": "django-post-office", "run_requires": [{"requires": ["django (>=1.8)", "jsonfield"]}, {"extra": "test", "requires": ["tox (>=2.3)"]}], "summary": "A Django app to monitor and send mail asynchronously, complete with template support.", "test_requires": [{"requires": ["tox (>=2.3)"]}], "version": "3.1.0"} \ No newline at end of file diff --git a/thesisenv/lib/python3.6/site-packages/django_post_office-3.1.0.dist-info/top_level.txt b/thesisenv/lib/python3.6/site-packages/django_post_office-3.1.0.dist-info/top_level.txt new file mode 100644 index 0000000..ba90aa3 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_post_office-3.1.0.dist-info/top_level.txt @@ -0,0 +1 @@ +post_office diff --git a/thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/DESCRIPTION.rst b/thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000..caa9f30 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/DESCRIPTION.rst @@ -0,0 +1,121 @@ +django-jsonfield +---------------- + +django-jsonfield is a reusable Django field that allows you to store validated JSON in your model. + +It silently takes care of serialization. To use, simply add the field to one of your models. + +Python 3 & Django 1.8 through 1.11 supported! + +**Use PostgreSQL?** 1.0.0 introduced a breaking change to the underlying data type, so if you were using < 1.0.0 please read https://github.com/dmkoch/django-jsonfield/issues/57 before upgrading. Also, consider switching to Django's native JSONField that was added in Django 1.9. + +**Note:** There are a couple of third-party add-on JSONFields for Django. This project is django-jsonfield here on GitHub but is named `jsonfield on PyPI`_. There is another `django-jsonfield on Bitbucket`_, but that one is `django-jsonfield on PyPI`_. I realize this naming conflict is confusing and I am open to merging the two projects. + +.. _jsonfield on PyPI: https://pypi.python.org/pypi/jsonfield +.. _django-jsonfield on Bitbucket: https://bitbucket.org/schinckel/django-jsonfield +.. _django-jsonfield on PyPI: https://pypi.python.org/pypi/django-jsonfield + +**Note:** Django 1.9 added native PostgreSQL JSON support in `django.contrib.postgres.fields.JSONField`_. This module is still useful if you need to support JSON in databases other than PostgreSQL or are creating a third-party module that needs to be database-agnostic. But if you're an end user using PostgreSQL and want full-featured JSON support, I recommend using the built-in JSONField from Django instead of this module. + +.. _django.contrib.postgres.fields.JSONField: https://docs.djangoproject.com/en/dev/ref/contrib/postgres/fields/#jsonfield + +**Note:** Semver is followed after the 1.0 release. + + +Installation +------------ + +.. code-block:: python + + pip install jsonfield + + +Usage +----- + +.. code-block:: python + + from django.db import models + from jsonfield import JSONField + + class MyModel(models.Model): + json = JSONField() + +Advanced Usage +-------------- + +By default python deserializes json into dict objects. This behavior differs from the standard json behavior because python dicts do not have ordered keys. + +To overcome this limitation and keep the sort order of OrderedDict keys the deserialisation can be adjusted on model initialisation: + +.. code-block:: python + + import collections + class MyModel(models.Model): + json = JSONField(load_kwargs={'object_pairs_hook': collections.OrderedDict}) + + +Other Fields +------------ + +**jsonfield.JSONCharField** + +If you need to use your JSON field in an index or other constraint, you can use **JSONCharField** which subclasses **CharField** instead of **TextField**. You'll also need to specify a **max_length** parameter if you use this field. + + +Compatibility +-------------- + +django-jsonfield aims to support the same versions of Django currently maintained by the main Django project. See `Django supported versions`_, currently: + + * Django 1.8 (LTS) with Python 2.7, 3.3, 3.4, or 3.5 + * Django 1.9 with Python 2.7, 3.4, or 3.5 + * Django 1.10 with Python 2.7, 3.4, or 3.5 + * Django 1.11 (LTS) with Python 2.7, 3.4, 3.5 or 3.6 + +.. _Django supported versions: https://www.djangoproject.com/download/#supported-versions + + +Testing django-jsonfield Locally +-------------------------------- + +To test against all supported versions of Django: + +.. code-block:: shell + + $ docker-compose build && docker-compose up + +Or just one version (for example Django 1.10 on Python 3.5): + +.. code-block:: shell + + $ docker-compose build && docker-compose run tox tox -e py35-1.10 + + +Travis CI +--------- + +.. image:: https://travis-ci.org/dmkoch/django-jsonfield.svg?branch=master + :target: https://travis-ci.org/dmkoch/django-jsonfield + +Contact +------- +Web: http://bradjasper.com + +Twitter: `@bradjasper`_ + +Email: `contact@bradjasper.com`_ + + + +.. _contact@bradjasper.com: mailto:contact@bradjasper.com +.. _@bradjasper: https://twitter.com/bradjasper + +Changes +------- + +Take a look at the `changelog`_. + +.. _changelog: https://github.com/dmkoch/django-jsonfield/blob/master/CHANGES.rst + + diff --git a/thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/INSTALLER b/thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/METADATA b/thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/METADATA new file mode 100644 index 0000000..2407db7 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/METADATA @@ -0,0 +1,143 @@ +Metadata-Version: 2.0 +Name: jsonfield +Version: 2.0.2 +Summary: A reusable Django field that allows you to store validated JSON in your model. +Home-page: https://github.com/dmkoch/django-jsonfield/ +Author: Dan Koch +Author-email: dmkoch@gmail.com +License: MIT +Platform: UNKNOWN +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Framework :: Django +Requires-Dist: Django (>=1.8.0) + +django-jsonfield +---------------- + +django-jsonfield is a reusable Django field that allows you to store validated JSON in your model. + +It silently takes care of serialization. To use, simply add the field to one of your models. + +Python 3 & Django 1.8 through 1.11 supported! + +**Use PostgreSQL?** 1.0.0 introduced a breaking change to the underlying data type, so if you were using < 1.0.0 please read https://github.com/dmkoch/django-jsonfield/issues/57 before upgrading. Also, consider switching to Django's native JSONField that was added in Django 1.9. + +**Note:** There are a couple of third-party add-on JSONFields for Django. This project is django-jsonfield here on GitHub but is named `jsonfield on PyPI`_. There is another `django-jsonfield on Bitbucket`_, but that one is `django-jsonfield on PyPI`_. I realize this naming conflict is confusing and I am open to merging the two projects. + +.. _jsonfield on PyPI: https://pypi.python.org/pypi/jsonfield +.. _django-jsonfield on Bitbucket: https://bitbucket.org/schinckel/django-jsonfield +.. _django-jsonfield on PyPI: https://pypi.python.org/pypi/django-jsonfield + +**Note:** Django 1.9 added native PostgreSQL JSON support in `django.contrib.postgres.fields.JSONField`_. This module is still useful if you need to support JSON in databases other than PostgreSQL or are creating a third-party module that needs to be database-agnostic. But if you're an end user using PostgreSQL and want full-featured JSON support, I recommend using the built-in JSONField from Django instead of this module. + +.. _django.contrib.postgres.fields.JSONField: https://docs.djangoproject.com/en/dev/ref/contrib/postgres/fields/#jsonfield + +**Note:** Semver is followed after the 1.0 release. + + +Installation +------------ + +.. code-block:: python + + pip install jsonfield + + +Usage +----- + +.. code-block:: python + + from django.db import models + from jsonfield import JSONField + + class MyModel(models.Model): + json = JSONField() + +Advanced Usage +-------------- + +By default python deserializes json into dict objects. This behavior differs from the standard json behavior because python dicts do not have ordered keys. + +To overcome this limitation and keep the sort order of OrderedDict keys the deserialisation can be adjusted on model initialisation: + +.. code-block:: python + + import collections + class MyModel(models.Model): + json = JSONField(load_kwargs={'object_pairs_hook': collections.OrderedDict}) + + +Other Fields +------------ + +**jsonfield.JSONCharField** + +If you need to use your JSON field in an index or other constraint, you can use **JSONCharField** which subclasses **CharField** instead of **TextField**. You'll also need to specify a **max_length** parameter if you use this field. + + +Compatibility +-------------- + +django-jsonfield aims to support the same versions of Django currently maintained by the main Django project. See `Django supported versions`_, currently: + + * Django 1.8 (LTS) with Python 2.7, 3.3, 3.4, or 3.5 + * Django 1.9 with Python 2.7, 3.4, or 3.5 + * Django 1.10 with Python 2.7, 3.4, or 3.5 + * Django 1.11 (LTS) with Python 2.7, 3.4, 3.5 or 3.6 + +.. _Django supported versions: https://www.djangoproject.com/download/#supported-versions + + +Testing django-jsonfield Locally +-------------------------------- + +To test against all supported versions of Django: + +.. code-block:: shell + + $ docker-compose build && docker-compose up + +Or just one version (for example Django 1.10 on Python 3.5): + +.. code-block:: shell + + $ docker-compose build && docker-compose run tox tox -e py35-1.10 + + +Travis CI +--------- + +.. image:: https://travis-ci.org/dmkoch/django-jsonfield.svg?branch=master + :target: https://travis-ci.org/dmkoch/django-jsonfield + +Contact +------- +Web: http://bradjasper.com + +Twitter: `@bradjasper`_ + +Email: `contact@bradjasper.com`_ + + + +.. _contact@bradjasper.com: mailto:contact@bradjasper.com +.. _@bradjasper: https://twitter.com/bradjasper + +Changes +------- + +Take a look at the `changelog`_. + +.. _changelog: https://github.com/dmkoch/django-jsonfield/blob/master/CHANGES.rst + + diff --git a/thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/RECORD b/thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/RECORD new file mode 100644 index 0000000..aff8e2d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/RECORD @@ -0,0 +1,19 @@ +jsonfield-2.0.2.dist-info/DESCRIPTION.rst,sha256=ol_8lnYqTVXq5ExDwAmbBhSUylv5skifu1MSqZJUszU,4077 +jsonfield-2.0.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +jsonfield-2.0.2.dist-info/METADATA,sha256=fILysyClwPP4RqmlbgBEyjyhmBkYfqG_laVx_21m19M,4892 +jsonfield-2.0.2.dist-info/RECORD,, +jsonfield-2.0.2.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110 +jsonfield-2.0.2.dist-info/metadata.json,sha256=EkZdQOU_zbNU_Uf6f6l2RPH3n9HoVNU0X8V0DbM5LIY,1010 +jsonfield-2.0.2.dist-info/top_level.txt,sha256=vKhrOliM1tJJBXUhSXSoHcWs_90pUa7ogHvn-mzxGKQ,10 +jsonfield/__init__.py,sha256=JzGSlVByVSYtIsp9iNf1S8pgxFdZ3ZY6y9Uys8hk2hs,53 +jsonfield/__pycache__/__init__.cpython-36.pyc,, +jsonfield/__pycache__/encoder.cpython-36.pyc,, +jsonfield/__pycache__/fields.cpython-36.pyc,, +jsonfield/__pycache__/models.cpython-36.pyc,, +jsonfield/__pycache__/subclassing.cpython-36.pyc,, +jsonfield/__pycache__/tests.cpython-36.pyc,, +jsonfield/encoder.py,sha256=LamzI8S3leLOW0RG80-YUb2oxZxtUhuHXpKogYadL2w,2306 +jsonfield/fields.py,sha256=09LBgXPcTxo6-rDwVewn8NhwtZ9yWe1DpoZehGVAcjk,6064 +jsonfield/models.py,sha256=yXIA5LSYKowbs8bQWcU1TJ4Yc10MdEP5zWS5QD5P38E,43 +jsonfield/subclassing.py,sha256=g1aGtzSlz3OisbQZgRtjDXVvQEqh0jNcMvZvBWHZ_ow,2236 +jsonfield/tests.py,sha256=c0E2QV2l2gc3OLTVEOsePHl8Nt3gdaTcU02ShZksAOo,14949 diff --git a/thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/WHEEL b/thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/WHEEL new file mode 100644 index 0000000..8b6dd1b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.29.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/metadata.json b/thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/metadata.json new file mode 100644 index 0000000..ee61bdf --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/metadata.json @@ -0,0 +1 @@ +{"classifiers": ["Environment :: Web Environment", "Intended Audience :: Developers", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Framework :: Django"], "extensions": {"python.details": {"contacts": [{"email": "dmkoch@gmail.com", "name": "Dan Koch", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://github.com/dmkoch/django-jsonfield/"}}}, "extras": [], "generator": "bdist_wheel (0.29.0)", "license": "MIT", "metadata_version": "2.0", "name": "jsonfield", "run_requires": [{"requires": ["Django (>=1.8.0)"]}], "summary": "A reusable Django field that allows you to store validated JSON in your model.", "test_requires": [{"requires": ["Django (>=1.8.0)"]}], "version": "2.0.2"} \ No newline at end of file diff --git a/thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/top_level.txt b/thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/top_level.txt new file mode 100644 index 0000000..4fdcfcf --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/jsonfield-2.0.2.dist-info/top_level.txt @@ -0,0 +1 @@ +jsonfield diff --git a/thesisenv/lib/python3.6/site-packages/jsonfield/__init__.py b/thesisenv/lib/python3.6/site-packages/jsonfield/__init__.py new file mode 100644 index 0000000..54360e2 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/jsonfield/__init__.py @@ -0,0 +1 @@ +from .fields import JSONField, JSONCharField # noqa diff --git a/thesisenv/lib/python3.6/site-packages/jsonfield/encoder.py b/thesisenv/lib/python3.6/site-packages/jsonfield/encoder.py new file mode 100644 index 0000000..4923a90 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/jsonfield/encoder.py @@ -0,0 +1,58 @@ +from django.db.models.query import QuerySet +from django.utils import six, timezone +from django.utils.encoding import force_text +from django.utils.functional import Promise +import datetime +import decimal +import json +import uuid + + +class JSONEncoder(json.JSONEncoder): + """ + JSONEncoder subclass that knows how to encode date/time/timedelta, + decimal types, generators and other basic python objects. + + Taken from https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/utils/encoders.py + """ + def default(self, obj): # noqa + # For Date Time string spec, see ECMA 262 + # http://ecma-international.org/ecma-262/5.1/#sec-15.9.1.15 + if isinstance(obj, Promise): + return force_text(obj) + elif isinstance(obj, datetime.datetime): + representation = obj.isoformat() + if obj.microsecond: + representation = representation[:23] + representation[26:] + if representation.endswith('+00:00'): + representation = representation[:-6] + 'Z' + return representation + elif isinstance(obj, datetime.date): + return obj.isoformat() + elif isinstance(obj, datetime.time): + if timezone and timezone.is_aware(obj): + raise ValueError("JSON can't represent timezone-aware times.") + representation = obj.isoformat() + if obj.microsecond: + representation = representation[:12] + return representation + elif isinstance(obj, datetime.timedelta): + return six.text_type(obj.total_seconds()) + elif isinstance(obj, decimal.Decimal): + # Serializers will coerce decimals to strings by default. + return float(obj) + elif isinstance(obj, uuid.UUID): + return six.text_type(obj) + elif isinstance(obj, QuerySet): + return tuple(obj) + elif hasattr(obj, 'tolist'): + # Numpy arrays and array scalars. + return obj.tolist() + elif hasattr(obj, '__getitem__'): + try: + return dict(obj) + except: + pass + elif hasattr(obj, '__iter__'): + return tuple(item for item in obj) + return super(JSONEncoder, self).default(obj) diff --git a/thesisenv/lib/python3.6/site-packages/jsonfield/fields.py b/thesisenv/lib/python3.6/site-packages/jsonfield/fields.py new file mode 100644 index 0000000..21a6e23 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/jsonfield/fields.py @@ -0,0 +1,183 @@ +import copy +from django.db import models +from django.utils.translation import ugettext_lazy as _ +try: + from django.utils import six +except ImportError: + import six + +try: + import json +except ImportError: + from django.utils import simplejson as json + +from django.forms import fields +try: + from django.forms.utils import ValidationError +except ImportError: + from django.forms.util import ValidationError + +from .subclassing import SubfieldBase +from .encoder import JSONEncoder + + +class JSONFormFieldBase(object): + def __init__(self, *args, **kwargs): + self.load_kwargs = kwargs.pop('load_kwargs', {}) + super(JSONFormFieldBase, self).__init__(*args, **kwargs) + + def to_python(self, value): + if isinstance(value, six.string_types) and value: + try: + return json.loads(value, **self.load_kwargs) + except ValueError: + raise ValidationError(_("Enter valid JSON")) + return value + + def clean(self, value): + + if not value and not self.required: + return None + + # Trap cleaning errors & bubble them up as JSON errors + try: + return super(JSONFormFieldBase, self).clean(value) + except TypeError: + raise ValidationError(_("Enter valid JSON")) + + +class JSONFormField(JSONFormFieldBase, fields.CharField): + pass + + +class JSONCharFormField(JSONFormFieldBase, fields.CharField): + pass + + +class JSONFieldBase(six.with_metaclass(SubfieldBase, models.Field)): + + def __init__(self, *args, **kwargs): + self.dump_kwargs = kwargs.pop('dump_kwargs', { + 'cls': JSONEncoder, + 'separators': (',', ':') + }) + self.load_kwargs = kwargs.pop('load_kwargs', {}) + + super(JSONFieldBase, self).__init__(*args, **kwargs) + + def pre_init(self, value, obj): + """Convert a string value to JSON only if it needs to be deserialized. + + SubfieldBase metaclass has been modified to call this method instead of + to_python so that we can check the obj state and determine if it needs to be + deserialized""" + + try: + if obj._state.adding: + # Make sure the primary key actually exists on the object before + # checking if it's empty. This is a special case for South datamigrations + # see: https://github.com/bradjasper/django-jsonfield/issues/52 + if getattr(obj, "pk", None) is not None: + if isinstance(value, six.string_types): + try: + return json.loads(value, **self.load_kwargs) + except ValueError: + raise ValidationError(_("Enter valid JSON")) + + except AttributeError: + # south fake meta class doesn't create proper attributes + # see this: + # https://github.com/bradjasper/django-jsonfield/issues/52 + pass + + return value + + def to_python(self, value): + """The SubfieldBase metaclass calls pre_init instead of to_python, however to_python + is still necessary for Django's deserializer""" + return value + + def get_prep_value(self, value): + """Convert JSON object to a string""" + if self.null and value is None: + return None + return json.dumps(value, **self.dump_kwargs) + + def _get_val_from_obj(self, obj): + # This function created to replace Django deprecated version + # https://code.djangoproject.com/ticket/24716 + if obj is not None: + return getattr(obj, self.attname) + else: + return self.get_default() + + def value_to_string(self, obj): + value = self._get_val_from_obj(obj) + return self.get_db_prep_value(value, None) + + def value_from_object(self, obj): + value = super(JSONFieldBase, self).value_from_object(obj) + if self.null and value is None: + return None + return self.dumps_for_display(value) + + def dumps_for_display(self, value): + return json.dumps(value, **self.dump_kwargs) + + def formfield(self, **kwargs): + + if "form_class" not in kwargs: + kwargs["form_class"] = self.form_class + + field = super(JSONFieldBase, self).formfield(**kwargs) + + if isinstance(field, JSONFormFieldBase): + field.load_kwargs = self.load_kwargs + + if not field.help_text: + field.help_text = "Enter valid JSON" + + return field + + def get_default(self): + """ + Returns the default value for this field. + + The default implementation on models.Field calls force_unicode + on the default, which means you can't set arbitrary Python + objects as the default. To fix this, we just return the value + without calling force_unicode on it. Note that if you set a + callable as a default, the field will still call it. It will + *not* try to pickle and encode it. + + """ + if self.has_default(): + if callable(self.default): + return self.default() + return copy.deepcopy(self.default) + # If the field doesn't have a default, then we punt to models.Field. + return super(JSONFieldBase, self).get_default() + + +class JSONField(JSONFieldBase, models.TextField): + """JSONField is a generic textfield that serializes/deserializes JSON objects""" + form_class = JSONFormField + + def dumps_for_display(self, value): + kwargs = {"indent": 2} + kwargs.update(self.dump_kwargs) + return json.dumps(value, **kwargs) + + +class JSONCharField(JSONFieldBase, models.CharField): + """JSONCharField is a generic textfield that serializes/deserializes JSON objects, + stored in the database like a CharField, which enables it to be used + e.g. in unique keys""" + form_class = JSONCharFormField + + +try: + from south.modelsinspector import add_introspection_rules + add_introspection_rules([], ["^jsonfield\.fields\.(JSONField|JSONCharField)"]) +except ImportError: + pass diff --git a/thesisenv/lib/python3.6/site-packages/jsonfield/models.py b/thesisenv/lib/python3.6/site-packages/jsonfield/models.py new file mode 100644 index 0000000..e5faf1b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/jsonfield/models.py @@ -0,0 +1 @@ +# Django needs this to see it as a project diff --git a/thesisenv/lib/python3.6/site-packages/jsonfield/subclassing.py b/thesisenv/lib/python3.6/site-packages/jsonfield/subclassing.py new file mode 100644 index 0000000..49e30e1 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/jsonfield/subclassing.py @@ -0,0 +1,62 @@ +# This file was copied from django.db.models.fields.subclassing so that we could +# change the Creator.__set__ behavior. Read the comment below for full details. + +""" +Convenience routines for creating non-trivial Field subclasses, as well as +backwards compatibility utilities. + +Add SubfieldBase as the __metaclass__ for your Field subclass, implement +to_python() and the other necessary methods and everything will work seamlessly. +""" + + +class SubfieldBase(type): + """ + A metaclass for custom Field subclasses. This ensures the model's attribute + has the descriptor protocol attached to it. + """ + def __new__(cls, name, bases, attrs): + new_class = super(SubfieldBase, cls).__new__(cls, name, bases, attrs) + new_class.contribute_to_class = make_contrib( + new_class, attrs.get('contribute_to_class') + ) + return new_class + + +class Creator(object): + """ + A placeholder class that provides a way to set the attribute on the model. + """ + def __init__(self, field): + self.field = field + + def __get__(self, obj, type=None): + if obj is None: + return self + return obj.__dict__[self.field.name] + + def __set__(self, obj, value): + # Usually this would call to_python, but we've changed it to pre_init + # so that we can tell which state we're in. By passing an obj, + # we can definitively tell if a value has already been deserialized + # More: https://github.com/bradjasper/django-jsonfield/issues/33 + obj.__dict__[self.field.name] = self.field.pre_init(value, obj) + + +def make_contrib(superclass, func=None): + """ + Returns a suitable contribute_to_class() method for the Field subclass. + + If 'func' is passed in, it is the existing contribute_to_class() method on + the subclass and it is called before anything else. It is assumed in this + case that the existing contribute_to_class() calls all the necessary + superclass methods. + """ + def contribute_to_class(self, cls, name): + if func: + func(self, cls, name) + else: + super(superclass, self).contribute_to_class(cls, name) + setattr(cls, self.name, Creator(self)) + + return contribute_to_class diff --git a/thesisenv/lib/python3.6/site-packages/jsonfield/tests.py b/thesisenv/lib/python3.6/site-packages/jsonfield/tests.py new file mode 100644 index 0000000..2d0b980 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/jsonfield/tests.py @@ -0,0 +1,392 @@ +from decimal import Decimal +import django +from django import forms +from django.core.serializers import deserialize, serialize +from django.core.serializers.base import DeserializationError +from django.contrib.contenttypes.fields import GenericForeignKey +from django.contrib.contenttypes.models import ContentType +from django.db import models +from django.test import TestCase +try: + import json +except ImportError: + from django.utils import simplejson as json + +from .fields import JSONField, JSONCharField +try: + from django.forms.utils import ValidationError +except ImportError: + from django.forms.util import ValidationError + +from django.utils.six import string_types + +from collections import OrderedDict + + +class JsonModel(models.Model): + json = JSONField() + default_json = JSONField(default={"check": 12}) + complex_default_json = JSONField(default=[{"checkcheck": 1212}]) + empty_default = JSONField(default={}) + + +class GenericForeignKeyObj(models.Model): + name = models.CharField('Foreign Obj', max_length=255, null=True) + + +class JSONModelWithForeignKey(models.Model): + json = JSONField(null=True) + foreign_obj = GenericForeignKey() + object_id = models.PositiveIntegerField(blank=True, null=True, db_index=True) + content_type = models.ForeignKey(ContentType, blank=True, null=True, + on_delete=models.CASCADE) + + +class JsonCharModel(models.Model): + json = JSONCharField(max_length=100) + default_json = JSONCharField(max_length=100, default={"check": 34}) + + +class ComplexEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, complex): + return { + '__complex__': True, + 'real': obj.real, + 'imag': obj.imag, + } + + return json.JSONEncoder.default(self, obj) + + +def as_complex(dct): + if '__complex__' in dct: + return complex(dct['real'], dct['imag']) + return dct + + +class JSONModelCustomEncoders(models.Model): + # A JSON field that can store complex numbers + json = JSONField( + dump_kwargs={'cls': ComplexEncoder, "indent": 4}, + load_kwargs={'object_hook': as_complex}, + ) + + +class JSONModelWithForeignKeyTestCase(TestCase): + def test_object_create(self): + foreign_obj = GenericForeignKeyObj.objects.create(name='Brain') + JSONModelWithForeignKey.objects.create(foreign_obj=foreign_obj) + + +class JSONFieldTest(TestCase): + """JSONField Wrapper Tests""" + + json_model = JsonModel + + def test_json_field_create(self): + """Test saving a JSON object in our JSONField""" + json_obj = { + "item_1": "this is a json blah", + "blergh": "hey, hey, hey"} + + obj = self.json_model.objects.create(json=json_obj) + new_obj = self.json_model.objects.get(id=obj.id) + + self.assertEqual(new_obj.json, json_obj) + + def test_string_in_json_field(self): + """Test saving an ordinary Python string in our JSONField""" + json_obj = 'blah blah' + obj = self.json_model.objects.create(json=json_obj) + new_obj = self.json_model.objects.get(id=obj.id) + + self.assertEqual(new_obj.json, json_obj) + + def test_float_in_json_field(self): + """Test saving a Python float in our JSONField""" + json_obj = 1.23 + obj = self.json_model.objects.create(json=json_obj) + new_obj = self.json_model.objects.get(id=obj.id) + + self.assertEqual(new_obj.json, json_obj) + + def test_int_in_json_field(self): + """Test saving a Python integer in our JSONField""" + json_obj = 1234567 + obj = self.json_model.objects.create(json=json_obj) + new_obj = self.json_model.objects.get(id=obj.id) + + self.assertEqual(new_obj.json, json_obj) + + def test_decimal_in_json_field(self): + """Test saving a Python Decimal in our JSONField""" + json_obj = Decimal(12.34) + obj = self.json_model.objects.create(json=json_obj) + new_obj = self.json_model.objects.get(id=obj.id) + + # here we must know to convert the returned string back to Decimal, + # since json does not support that format + self.assertEqual(Decimal(new_obj.json), json_obj) + + def test_json_field_modify(self): + """Test modifying a JSON object in our JSONField""" + json_obj_1 = {'a': 1, 'b': 2} + json_obj_2 = {'a': 3, 'b': 4} + + obj = self.json_model.objects.create(json=json_obj_1) + self.assertEqual(obj.json, json_obj_1) + obj.json = json_obj_2 + + self.assertEqual(obj.json, json_obj_2) + obj.save() + self.assertEqual(obj.json, json_obj_2) + + self.assertTrue(obj) + + def test_json_field_load(self): + """Test loading a JSON object from the DB""" + json_obj_1 = {'a': 1, 'b': 2} + obj = self.json_model.objects.create(json=json_obj_1) + new_obj = self.json_model.objects.get(id=obj.id) + + self.assertEqual(new_obj.json, json_obj_1) + + def test_json_list(self): + """Test storing a JSON list""" + json_obj = ["my", "list", "of", 1, "objs", {"hello": "there"}] + + obj = self.json_model.objects.create(json=json_obj) + new_obj = self.json_model.objects.get(id=obj.id) + self.assertEqual(new_obj.json, json_obj) + + def test_empty_objects(self): + """Test storing empty objects""" + for json_obj in [{}, [], 0, '', False]: + obj = self.json_model.objects.create(json=json_obj) + new_obj = self.json_model.objects.get(id=obj.id) + self.assertEqual(json_obj, obj.json) + self.assertEqual(json_obj, new_obj.json) + + def test_custom_encoder(self): + """Test encoder_cls and object_hook""" + value = 1 + 3j # A complex number + + obj = JSONModelCustomEncoders.objects.create(json=value) + new_obj = JSONModelCustomEncoders.objects.get(pk=obj.pk) + self.assertEqual(value, new_obj.json) + + def test_django_serializers(self): + """Test serializing/deserializing jsonfield data""" + for json_obj in [{}, [], 0, '', False, {'key': 'value', 'num': 42, + 'ary': list(range(5)), + 'dict': {'k': 'v'}}]: + obj = self.json_model.objects.create(json=json_obj) + new_obj = self.json_model.objects.get(id=obj.id) + self.assert_(new_obj) + + queryset = self.json_model.objects.all() + ser = serialize('json', queryset) + for dobj in deserialize('json', ser): + obj = dobj.object + pulled = self.json_model.objects.get(id=obj.pk) + self.assertEqual(obj.json, pulled.json) + + def test_default_parameters(self): + """Test providing a default value to the model""" + model = JsonModel() + model.json = {"check": 12} + self.assertEqual(model.json, {"check": 12}) + self.assertEqual(type(model.json), dict) + + self.assertEqual(model.default_json, {"check": 12}) + self.assertEqual(type(model.default_json), dict) + + def test_invalid_json(self): + # invalid json data {] in the json and default_json fields + ser = '[{"pk": 1, "model": "jsonfield.jsoncharmodel", ' \ + '"fields": {"json": "{]", "default_json": "{]"}}]' + with self.assertRaises(DeserializationError) as cm: + next(deserialize('json', ser)) + # Django 2.0+ uses PEP 3134 exception chaining + if django.VERSION < (2, 0,): + inner = cm.exception.args[0] + else: + inner = cm.exception.__context__ + self.assertTrue(isinstance(inner, ValidationError)) + self.assertEqual('Enter valid JSON', inner.messages[0]) + + def test_integer_in_string_in_json_field(self): + """Test saving the Python string '123' in our JSONField""" + json_obj = '123' + obj = self.json_model.objects.create(json=json_obj) + new_obj = self.json_model.objects.get(id=obj.id) + + self.assertEqual(new_obj.json, json_obj) + + def test_boolean_in_string_in_json_field(self): + """Test saving the Python string 'true' in our JSONField""" + json_obj = 'true' + obj = self.json_model.objects.create(json=json_obj) + new_obj = self.json_model.objects.get(id=obj.id) + + self.assertEqual(new_obj.json, json_obj) + + def test_pass_by_reference_pollution(self): + """Make sure the default parameter is copied rather than passed by reference""" + model = JsonModel() + model.default_json["check"] = 144 + model.complex_default_json[0]["checkcheck"] = 144 + self.assertEqual(model.default_json["check"], 144) + self.assertEqual(model.complex_default_json[0]["checkcheck"], 144) + + # Make sure when we create a new model, it resets to the default value + # and not to what we just set it to (it would be if it were passed by reference) + model = JsonModel() + self.assertEqual(model.default_json["check"], 12) + self.assertEqual(model.complex_default_json[0]["checkcheck"], 1212) + + def test_normal_regex_filter(self): + """Make sure JSON model can filter regex""" + + JsonModel.objects.create(json={"boom": "town"}) + JsonModel.objects.create(json={"move": "town"}) + JsonModel.objects.create(json={"save": "town"}) + + self.assertEqual(JsonModel.objects.count(), 3) + + self.assertEqual(JsonModel.objects.filter(json__regex=r"boom").count(), 1) + self.assertEqual(JsonModel.objects.filter(json__regex=r"town").count(), 3) + + def test_save_blank_object(self): + """Test that JSON model can save a blank object as none""" + + model = JsonModel() + self.assertEqual(model.empty_default, {}) + + model.save() + self.assertEqual(model.empty_default, {}) + + model1 = JsonModel(empty_default={"hey": "now"}) + self.assertEqual(model1.empty_default, {"hey": "now"}) + + model1.save() + self.assertEqual(model1.empty_default, {"hey": "now"}) + + +class JSONCharFieldTest(JSONFieldTest): + json_model = JsonCharModel + + +class OrderedJsonModel(models.Model): + json = JSONField(load_kwargs={'object_pairs_hook': OrderedDict}) + + +class OrderedDictSerializationTest(TestCase): + def setUp(self): + self.ordered_dict = OrderedDict([ + ('number', [1, 2, 3, 4]), + ('notes', True), + ('alpha', True), + ('romeo', True), + ('juliet', True), + ('bravo', True), + ]) + self.expected_key_order = ['number', 'notes', 'alpha', 'romeo', 'juliet', 'bravo'] + + def test_ordered_dict_differs_from_normal_dict(self): + self.assertEqual(list(self.ordered_dict.keys()), self.expected_key_order) + self.assertNotEqual(dict(self.ordered_dict).keys(), self.expected_key_order) + + def test_default_behaviour_loses_sort_order(self): + mod = JsonModel.objects.create(json=self.ordered_dict) + self.assertEqual(list(mod.json.keys()), self.expected_key_order) + mod_from_db = JsonModel.objects.get(id=mod.id) + + # mod_from_db lost ordering information during json.loads() + self.assertNotEqual(mod_from_db.json.keys(), self.expected_key_order) + + def test_load_kwargs_hook_does_not_lose_sort_order(self): + mod = OrderedJsonModel.objects.create(json=self.ordered_dict) + self.assertEqual(list(mod.json.keys()), self.expected_key_order) + mod_from_db = OrderedJsonModel.objects.get(id=mod.id) + self.assertEqual(list(mod_from_db.json.keys()), self.expected_key_order) + + +class JsonNotRequiredModel(models.Model): + json = JSONField(blank=True, null=True) + + +class JsonNotRequiredForm(forms.ModelForm): + class Meta: + model = JsonNotRequiredModel + fields = '__all__' + + +class JsonModelFormTest(TestCase): + def test_blank_form(self): + form = JsonNotRequiredForm(data={'json': ''}) + self.assertFalse(form.has_changed()) + + def test_form_with_data(self): + form = JsonNotRequiredForm(data={'json': '{}'}) + self.assertTrue(form.has_changed()) + + +class TestFieldAPIMethods(TestCase): + def test_get_db_prep_value_method_with_null(self): + json_field_instance = JSONField(null=True) + value = {'a': 1} + prepared_value = json_field_instance.get_db_prep_value( + value, connection=None, prepared=False) + self.assertIsInstance(prepared_value, string_types) + self.assertDictEqual(value, json.loads(prepared_value)) + self.assertIs(json_field_instance.get_db_prep_value( + None, connection=None, prepared=True), None) + self.assertIs(json_field_instance.get_db_prep_value( + None, connection=None, prepared=False), None) + + def test_get_db_prep_value_method_with_not_null(self): + json_field_instance = JSONField(null=False) + value = {'a': 1} + prepared_value = json_field_instance.get_db_prep_value( + value, connection=None, prepared=False) + self.assertIsInstance(prepared_value, string_types) + self.assertDictEqual(value, json.loads(prepared_value)) + self.assertIs(json_field_instance.get_db_prep_value( + None, connection=None, prepared=True), None) + self.assertEqual(json_field_instance.get_db_prep_value( + None, connection=None, prepared=False), 'null') + + def test_get_db_prep_value_method_skips_prepared_values(self): + json_field_instance = JSONField(null=False) + value = {'a': 1} + prepared_value = json_field_instance.get_db_prep_value( + value, connection=None, prepared=True) + self.assertIs(prepared_value, value) + + def test_get_prep_value_always_json_dumps_if_not_null(self): + json_field_instance = JSONField(null=False) + value = {'a': 1} + prepared_value = json_field_instance.get_prep_value(value) + self.assertIsInstance(prepared_value, string_types) + self.assertDictEqual(value, json.loads(prepared_value)) + already_json = json.dumps(value) + double_prepared_value = json_field_instance.get_prep_value( + already_json) + self.assertDictEqual(value, + json.loads(json.loads(double_prepared_value))) + self.assertEqual(json_field_instance.get_prep_value(None), 'null') + + def test_get_prep_value_can_return_none_if_null(self): + json_field_instance = JSONField(null=True) + value = {'a': 1} + prepared_value = json_field_instance.get_prep_value(value) + self.assertIsInstance(prepared_value, string_types) + self.assertDictEqual(value, json.loads(prepared_value)) + already_json = json.dumps(value) + double_prepared_value = json_field_instance.get_prep_value( + already_json) + self.assertDictEqual(value, + json.loads(json.loads(double_prepared_value))) + self.assertIs(json_field_instance.get_prep_value(None), None) diff --git a/thesisenv/lib/python3.6/site-packages/post_office/__init__.py b/thesisenv/lib/python3.6/site-packages/post_office/__init__.py new file mode 100644 index 0000000..2a93401 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/__init__.py @@ -0,0 +1,5 @@ +VERSION = (3, 1, 0) + +from .backends import EmailBackend + +default_app_config = 'post_office.apps.PostOfficeConfig' diff --git a/thesisenv/lib/python3.6/site-packages/post_office/admin.py b/thesisenv/lib/python3.6/site-packages/post_office/admin.py new file mode 100644 index 0000000..dc6c277 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/admin.py @@ -0,0 +1,153 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals +from django import forms +from django.db import models +from django.contrib import admin +from django.conf import settings +from django.forms.widgets import TextInput +from django.utils import six +from django.utils.text import Truncator +from django.utils.translation import ugettext_lazy as _ + +from .fields import CommaSeparatedEmailField +from .models import Attachment, Log, Email, EmailTemplate, STATUS + + +def get_message_preview(instance): + return (u'{0}...'.format(instance.message[:25]) if len(instance.message) > 25 + else instance.message) + +get_message_preview.short_description = 'Message' + + +class LogInline(admin.StackedInline): + model = Log + extra = 0 + + +class CommaSeparatedEmailWidget(TextInput): + + def __init__(self, *args, **kwargs): + super(CommaSeparatedEmailWidget, self).__init__(*args, **kwargs) + self.attrs.update({'class': 'vTextField'}) + + def _format_value(self, value): + # If the value is a string wrap it in a list so it does not get sliced. + if not value: + return '' + if isinstance(value, six.string_types): + value = [value, ] + return ','.join([item for item in value]) + + +def requeue(modeladmin, request, queryset): + """An admin action to requeue emails.""" + queryset.update(status=STATUS.queued) + + +requeue.short_description = 'Requeue selected emails' + + +class EmailAdmin(admin.ModelAdmin): + list_display = ('id', 'to_display', 'subject', 'template', + 'status', 'last_updated') + search_fields = ['to', 'subject'] + date_hierarchy = 'last_updated' + inlines = [LogInline] + list_filter = ['status'] + formfield_overrides = { + CommaSeparatedEmailField: {'widget': CommaSeparatedEmailWidget} + } + actions = [requeue] + + def get_queryset(self, request): + return super(EmailAdmin, self).get_queryset(request).select_related('template') + + def to_display(self, instance): + return ', '.join(instance.to) + + to_display.short_description = 'to' + to_display.admin_order_field = 'to' + + +class LogAdmin(admin.ModelAdmin): + list_display = ('date', 'email', 'status', get_message_preview) + + +class SubjectField(TextInput): + def __init__(self, *args, **kwargs): + super(SubjectField, self).__init__(*args, **kwargs) + self.attrs.update({'style': 'width: 610px;'}) + + +class EmailTemplateAdminForm(forms.ModelForm): + + language = forms.ChoiceField(choices=settings.LANGUAGES, required=False, + help_text=_("Render template in alternative language"), + label=_("Language")) + + class Meta: + model = EmailTemplate + fields = ('name', 'description', 'subject', + 'content', 'html_content', 'language', 'default_template') + + +class EmailTemplateInline(admin.StackedInline): + form = EmailTemplateAdminForm + model = EmailTemplate + extra = 0 + fields = ('language', 'subject', 'content', 'html_content',) + formfield_overrides = { + models.CharField: {'widget': SubjectField} + } + + def get_max_num(self, request, obj=None, **kwargs): + return len(settings.LANGUAGES) + + +class EmailTemplateAdmin(admin.ModelAdmin): + form = EmailTemplateAdminForm + list_display = ('name', 'description_shortened', 'subject', 'languages_compact', 'created') + search_fields = ('name', 'description', 'subject') + fieldsets = [ + (None, { + 'fields': ('name', 'description'), + }), + (_("Default Content"), { + 'fields': ('subject', 'content', 'html_content'), + }), + ] + inlines = (EmailTemplateInline,) if settings.USE_I18N else () + formfield_overrides = { + models.CharField: {'widget': SubjectField} + } + + def get_queryset(self, request): + return self.model.objects.filter(default_template__isnull=True) + + def description_shortened(self, instance): + return Truncator(instance.description.split('\n')[0]).chars(200) + description_shortened.short_description = _("Description") + description_shortened.admin_order_field = 'description' + + def languages_compact(self, instance): + languages = [tt.language for tt in instance.translated_templates.order_by('language')] + return ', '.join(languages) + languages_compact.short_description = _("Languages") + + def save_model(self, request, obj, form, change): + obj.save() + + # if the name got changed, also change the translated templates to match again + if 'name' in form.changed_data: + obj.translated_templates.update(name=obj.name) + + +class AttachmentAdmin(admin.ModelAdmin): + list_display = ('name', 'file', ) + + +admin.site.register(Email, EmailAdmin) +admin.site.register(Log, LogAdmin) +admin.site.register(EmailTemplate, EmailTemplateAdmin) +admin.site.register(Attachment, AttachmentAdmin) diff --git a/thesisenv/lib/python3.6/site-packages/post_office/apps.py b/thesisenv/lib/python3.6/site-packages/post_office/apps.py new file mode 100644 index 0000000..62cbb31 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/apps.py @@ -0,0 +1,7 @@ +from django.apps import AppConfig +from django.utils.translation import ugettext_lazy as _ + + +class PostOfficeConfig(AppConfig): + name = 'post_office' + verbose_name = _("Post Office") diff --git a/thesisenv/lib/python3.6/site-packages/post_office/backends.py b/thesisenv/lib/python3.6/site-packages/post_office/backends.py new file mode 100644 index 0000000..8aa1a9b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/backends.py @@ -0,0 +1,56 @@ +from django.core.files.base import ContentFile +from django.core.mail.backends.base import BaseEmailBackend + +from .settings import get_default_priority + + +class EmailBackend(BaseEmailBackend): + + def open(self): + pass + + def close(self): + pass + + def send_messages(self, email_messages): + """ + Queue one or more EmailMessage objects and returns the number of + email messages sent. + """ + from .mail import create + from .utils import create_attachments + + if not email_messages: + return + + for email_message in email_messages: + subject = email_message.subject + from_email = email_message.from_email + message = email_message.body + headers = email_message.extra_headers + + # Check whether email has 'text/html' alternative + alternatives = getattr(email_message, 'alternatives', ()) + for alternative in alternatives: + if alternative[1].startswith('text/html'): + html_message = alternative[0] + break + else: + html_message = '' + + attachment_files = dict([(name, ContentFile(content)) + for name, content, _ in email_message.attachments]) + + email = create(sender=from_email, + recipients=email_message.to, cc=email_message.cc, + bcc=email_message.bcc, subject=subject, + message=message, html_message=html_message, + headers=headers) + + if attachment_files: + attachments = create_attachments(attachment_files) + + email.attachments.add(*attachments) + + if get_default_priority() == 'now': + email.dispatch() diff --git a/thesisenv/lib/python3.6/site-packages/post_office/cache.py b/thesisenv/lib/python3.6/site-packages/post_office/cache.py new file mode 100644 index 0000000..232b0bf --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/cache.py @@ -0,0 +1,26 @@ +from django.template.defaultfilters import slugify + +from .settings import get_cache_backend + +# Stripped down version of caching functions from django-dbtemplates +# https://github.com/jezdez/django-dbtemplates/blob/develop/dbtemplates/utils/cache.py +cache_backend = get_cache_backend() + + +def get_cache_key(name): + """ + Prefixes and slugify the key name + """ + return 'post_office:template:%s' % (slugify(name)) + + +def set(name, content): + return cache_backend.set(get_cache_key(name), content) + + +def get(name): + return cache_backend.get(get_cache_key(name)) + + +def delete(name): + return cache_backend.delete(get_cache_key(name)) diff --git a/thesisenv/lib/python3.6/site-packages/post_office/compat.py b/thesisenv/lib/python3.6/site-packages/post_office/compat.py new file mode 100644 index 0000000..a7e6dca --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/compat.py @@ -0,0 +1,46 @@ +try: + import importlib +except ImportError: + from django.utils import importlib + +try: + from logging.config import dictConfig # Python >= 2.7 +except ImportError: + from django.utils.log import dictConfig # Django <= 1.9 + +import sys + + +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + + +if PY3: + string_types = str + text_type = str +else: + string_types = basestring + text_type = unicode + + +try: + from django.core.cache import caches # Django >= 1.7 + + def get_cache(name): + return caches[name] +except ImportError: + from django.core.cache import get_cache + + +try: + from django.utils.encoding import smart_text # For Django >= 1.5 +except ImportError: + from django.utils.encoding import smart_unicode as smart_text + + +# Django 1.4 doesn't have ``import_string`` or ``import_by_path`` +def import_attribute(name): + """Return an attribute from a dotted path name (e.g. "path.to.func").""" + module_name, attribute = name.rsplit('.', 1) + module = importlib.import_module(module_name) + return getattr(module, attribute) diff --git a/thesisenv/lib/python3.6/site-packages/post_office/connections.py b/thesisenv/lib/python3.6/site-packages/post_office/connections.py new file mode 100644 index 0000000..088ef45 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/connections.py @@ -0,0 +1,44 @@ +from threading import local + +from django.core.mail import get_connection + +from .settings import get_backend + + +# Copied from Django 1.8's django.core.cache.CacheHandler +class ConnectionHandler(object): + """ + A Cache Handler to manage access to Cache instances. + + Ensures only one instance of each alias exists per thread. + """ + def __init__(self): + self._connections = local() + + def __getitem__(self, alias): + try: + return self._connections.connections[alias] + except AttributeError: + self._connections.connections = {} + except KeyError: + pass + + try: + backend = get_backend(alias) + except KeyError: + raise KeyError('%s is not a valid backend alias' % alias) + + connection = get_connection(backend) + connection.open() + self._connections.connections[alias] = connection + return connection + + def all(self): + return getattr(self._connections, 'connections', {}).values() + + def close(self): + for connection in self.all(): + connection.close() + + +connections = ConnectionHandler() diff --git a/thesisenv/lib/python3.6/site-packages/post_office/fields.py b/thesisenv/lib/python3.6/site-packages/post_office/fields.py new file mode 100644 index 0000000..16b42ed --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/fields.py @@ -0,0 +1,58 @@ +from django.db.models import TextField +from django.utils import six +from django.utils.translation import ugettext_lazy as _ + +from .validators import validate_comma_separated_emails + + +class CommaSeparatedEmailField(TextField): + default_validators = [validate_comma_separated_emails] + description = _("Comma-separated emails") + + def __init__(self, *args, **kwargs): + kwargs['blank'] = True + super(CommaSeparatedEmailField, self).__init__(*args, **kwargs) + + def formfield(self, **kwargs): + defaults = { + 'error_messages': { + 'invalid': _('Only comma separated emails are allowed.'), + } + } + defaults.update(kwargs) + return super(CommaSeparatedEmailField, self).formfield(**defaults) + + def from_db_value(self, value, expression, connection, context): + return self.to_python(value) + + def get_prep_value(self, value): + """ + We need to accomodate queries where a single email, + or list of email addresses is supplied as arguments. For example: + + - Email.objects.filter(to='mail@example.com') + - Email.objects.filter(to=['one@example.com', 'two@example.com']) + """ + if isinstance(value, six.string_types): + return value + else: + return ', '.join(map(lambda s: s.strip(), value)) + + def to_python(self, value): + if isinstance(value, six.string_types): + if value == '': + return [] + else: + return [s.strip() for s in value.split(',')] + else: + return value + + def south_field_triple(self): + """ + Return a suitable description of this field for South. + Taken from smiley chris' easy_thumbnails + """ + from south.modelsinspector import introspector + field_class = 'django.db.models.fields.TextField' + args, kwargs = introspector(self) + return (field_class, args, kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/post_office/locale/de/LC_MESSAGES/django.mo b/thesisenv/lib/python3.6/site-packages/post_office/locale/de/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..f3976591c060b6fba90eaa800320b85e29160c8b GIT binary patch literal 1632 zcmaKr&yUw&_lb9yX zU^`ukIB@31fg4wDoVXz&ap1&}e*g)I1N;G8%7yQnNxP*;SaCkK^ZefT*)PAIIr)`9 zJBR)X`tRs(pnv-iUT9kn3vmJrKs+ME>qT%EJO$nWp8!+vRnUPA@JsM9@N4i%@SB?7 zfsbSUBgp%I2G4`PfGgmgdi`|-eF5{gz~{j&@KJCdJPBS0@rbBir+S{(^Et?NN8nB5 z@)^iF{=niR;9nr?{TpPz=dj4%CHOL^K-SpJBhK(_CI9RC_vU&&%0-@#dQ9M`?}EJmCI z`+EwV<8mz4dm5b^Yh{1joR^fvM3_u zXdcJ^lBLzDHc>8TIwunqB&eCT<;oG|2uU{OVj~bhqGzRyt<=wV;e{v^akN^eyhz^qb>?Hy*-C+PoMaAaQWJ7$T*9vBfq&n z-BFj0?O4`~tO`wBCfeY{FCA*%*vl&$, YEAR. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-07-06 07:47+0200\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: admin.py:97 +#, fuzzy +msgid "Default Content" +msgstr "Inhalt" + +#: admin.py:111 +msgid "Description" +msgstr "Beschreibung" + +#: admin.py:117 +msgid "Languages" +msgstr "Sprachen" + +#: fields.py:11 +msgid "Comma-separated emails" +msgstr "Durch Kommas getrennte Emails" + +#: fields.py:20 +msgid "Only comma separated emails are allowed." +msgstr "Nur durch Kommas getrennte Emails sind erlaubt" + +#: models.py:38 +msgid "low" +msgstr "niedrig" + +#: models.py:38 +msgid "medium" +msgstr "mittel" + +#: models.py:39 +msgid "high" +msgstr "hoch" + +#: models.py:39 +msgid "now" +msgstr "sofort" + +#: models.py:40 models.py:169 +msgid "sent" +msgstr "gesendet" + +#: models.py:40 models.py:169 +msgid "failed" +msgstr "fehlgeschlagen" + +#: models.py:41 +msgid "queued" +msgstr "in der Warteschleife" + +#: models.py:44 +msgid "Email From" +msgstr "Email Von" + +#: models.py:45 +msgid "Email To" +msgstr "Email An" + +#: models.py:46 +msgid "Cc" +msgstr "Kopie" + +#: models.py:47 +msgid "Bcc" +msgstr "Blinde Kopie" + +#: models.py:48 models.py:194 +msgid "Subject" +msgstr "Betreff" + +#: models.py:49 +msgid "Message" +msgstr "Nachricht" + +#: models.py:50 +msgid "HTML Message" +msgstr "HTML Nachricht" + +#: models.py:188 +msgid "e.g: 'welcome_email'" +msgstr "z.B. 'welcome_email'" + +#: models.py:190 +msgid "Description of this template." +msgstr "Beschreibung dieser Vorlage" + +#: models.py:196 +msgid "Content" +msgstr "Inhalt" + +#: models.py:198 +msgid "HTML content" +msgstr "HTML Inhalt" + +#: models.py:200 +msgid "Render template in alternative language" +msgstr "Vorlage in alternativer Sprache rendern" + +#: models.py:208 +#, fuzzy +msgid "Email Template" +msgstr "Email Vorlage" + +#: models.py:209 +#, fuzzy +msgid "Email Templates" +msgstr "Email Vorlagen" + +#: models.py:236 +msgid "The original filename" +msgstr "Ursprünglicher Dateiname" diff --git a/thesisenv/lib/python3.6/site-packages/post_office/locale/it/LC_MESSAGES/django.mo b/thesisenv/lib/python3.6/site-packages/post_office/locale/it/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..2bac6397a7947ff01a2fadc58f2e8dddbfa3b1e7 GIT binary patch literal 1611 zcmZvb&u<$=6vwAfei_P-0xckZ42@7)s&$?4BgJV@9DCDPUE7iEL;@jI6YtAH%^2)+S$*}o0&K7z4`8M7teny&|XG= z75xwNx6pq$hZoxBLqa?ZHb6Wg#_JNe13m`c1)l^n@HNnb74RGIaqv6vDe(J}KY~wS z{4>b=e*v$8zkO4t){hx4{>{P4H204?GWk0^$*>v{O0G%kc!{b;sam$mPp& z{!bVmf&Fjr74RRB1pWl_ z{@=mN;9nr;aSod}?@J)Z_d19vu~hOJ$miWC`2oo5n_zh*vweIAFQKDci^g$rt$FMV=YR)!NKiGa%jILraU|WHWxXs8kpiV+th2GeX`-N1PYYr2 z`T3NlnDShdU1!;I)J~&)avrs*%iTuvPUChg_v8Mc-RoBRG_fv39iQnm+L~rQ8rr(7 z?DmFH(-A_oCTgK@b-7wwc`vH1MYZ>3ZLR+9+V$FMt%ikBpN=#;FV}q(H~LYh7quF4 zr&sT^uYcU@#+5|*5DlF&9@k)9U2g9siR?ByvAh*`8tr7W(Q5VM!QewIo!2}Xk}B#l zX>@PzamcrlWHZ-Q>$1}|2({Dh#HabKRBM$|6w&bB1aSxYBHX~`YqKs#W92-BjlJP^ zbn|o_hjmCUip|KT+Mwb$4|J&P=2NHgXxqBN*QJ@vhJIspUCuu@t{VB~hFrO}UYX0( zPPCe<%Abfs%T&oD?J}EFsn%(*rAF_gYIOEnrdr|#eNYSCoG3OeJaSxl)EEiV~ZV, YEAR. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-07-06 07:47+0200\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: admin.py:97 +#, fuzzy +msgid "Default Content" +msgstr "Contenuto" + +#: admin.py:111 +msgid "Description" +msgstr "Descrizione" + +#: admin.py:117 +msgid "Languages" +msgstr "Lingue" + +#: fields.py:11 +msgid "Comma-separated emails" +msgstr "Emails separati da virgole" + +#: fields.py:20 +msgid "Only comma separated emails are allowed." +msgstr "Sono consentiti soltanto emails separati da virgole" + +#: models.py:38 +msgid "low" +msgstr "bassa" + +#: models.py:38 +msgid "medium" +msgstr "media" + +#: models.py:39 +msgid "high" +msgstr "alta" + +#: models.py:39 +msgid "now" +msgstr "immediata" + +#: models.py:40 models.py:169 +msgid "sent" +msgstr "inviato" + +#: models.py:40 models.py:169 +msgid "failed" +msgstr "fallito" + +#: models.py:41 +msgid "queued" +msgstr "in attesa" + +#: models.py:44 +msgid "Email From" +msgstr "Email da" + +#: models.py:45 +msgid "Email To" +msgstr "Email per" + +#: models.py:46 +msgid "Cc" +msgstr "Copia" + +#: models.py:47 +msgid "Bcc" +msgstr "Bcc" + +#: models.py:48 models.py:194 +msgid "Subject" +msgstr "Soggetto" + +#: models.py:49 +msgid "Message" +msgstr "Messaggio" + +#: models.py:50 +msgid "HTML Message" +msgstr "HTML Messaggio" + +#: models.py:188 +msgid "e.g: 'welcome_email'" +msgstr "z.B. 'welcome_email'" + +#: models.py:190 +msgid "Description of this template." +msgstr "Descrizione di questa template." + +#: models.py:196 +msgid "Content" +msgstr "Contenuto" + +#: models.py:198 +msgid "HTML content" +msgstr "Contenuto in HTML" + +#: models.py:200 +msgid "Render template in alternative language" +msgstr "Rendere template in un altra lingua" + +#: models.py:208 +#, fuzzy +msgid "Email Template" +msgstr "Email Template" + +#: models.py:209 +#, fuzzy +msgid "Email Templates" +msgstr "Email Templates" + +#: models.py:236 +msgid "The original filename" +msgstr "Nome del file originale" diff --git a/thesisenv/lib/python3.6/site-packages/post_office/locale/pl/LC_MESSAGES/django.mo b/thesisenv/lib/python3.6/site-packages/post_office/locale/pl/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..6a2b5696dc04d6ddec7690839f39f33ce1eb1109 GIT binary patch literal 2698 zcmaKt&u<$=6vwAfN-^a}%dZp)4A375WfLdmM{$!-H*wlfJC0)a2N6;`-JRGI@6MRr z**I&;fl5_{0Eq*YKou7x#2?Tb2ZYpw0}?{w4hIfgxK`=`f%raqw+*FY;#A(3sBL1R#JWhlHV`E9pKj>e(W1m6u)aA#r@}0{~JjD{s3__b_1mK{{pck zmP7Dp-gc1m93=k^NdCuCmO#qyX^`f*AjNALq&S2i&07I!y>~%c?-Do$ehlKrzC$$x z{s5An>mb?v3DUjWgheRs10bFI0Ei(r3zB^eB>P2>);|l<{+l4#bwC`Ay$>SnHw)>= z-%F6EQtftd4(bC3}TopCqhPUQ(GUw}{^>5Wp^+Xo)B zwZd%%Qriv9h#e7bOKOh`Ux|oKxopY=fp8+(7NM}x<1!G+$B5CE=JvR1*|b~|v2S^% z&-zvdZ66b15V@ghTV?ddHaAP$HdVwk!(mpU75GeOg0xxBeDp`MZj=q1v>`k%lu@Qm zdz*Wc{%s*|vyoBXDpO~BVWm}9_O{Gz(w1yS`H~&0SIWGCbwnfWxM?hoN#V&bVr8Kl zF^y&os7!+-V%fww5lB|me!^V}Fuy&(T!a!y@J&a0!)!iOCR8?I3&@QOZw8Ai4I?W< zEv#CR+|O37Sz+Ud)#7u@(zUGKlpGEkN(-Ma;S`#dCvD_5r5F21Yy{UzH@H<8lEaMx z-`9~ob|=sDa=wo(AurNnP1R^JY>WlcQ*ppFn(xFi#t?j1Y}U}C&aCI0L^zP}0-rBV z9WNd&@sp*6+H7?$w; zbv}Ba@XEpc`LTQ+hR%XqQRFp~B|cuN6lcqai__B!rCMzgrv0orbt!@ZFBj*Io}ev{m&=EJHEhDhEgNjL zGFvI#I^XDUK9^p7r;e*qK#ErF2(G@;hq&7mVI=LuiTaFl@Rl8IYe|MqNxOzZU*HGN zDVv-3<52j{j0uCNz;(OVjV2Bp;{E2tpq`i*)!Nur|NR0~J+G^zZ?xp?uq$1qTOv^X z4}rwojtH$;zp{3T9jAxTitsfsL9%|u*MfI94BuH>ZL5e;g^)E2kGkR<){4^?Ydlu# zYx)a{tef=vDz){QZ_T88m*79wmVd;3X8CjnpXZ6s#t_-m^z8G;IQxNa3i90(TQH zS7>~Jx*|dlqV)?xE5URdPrq$C$_ig6+2c1}*p2a}OBgH-j;&RMdi_c$J&m5Q3I0y- Ur(sP8HoRqgx$H`^Ccg>$2l?`SZU6uP literal 0 HcmV?d00001 diff --git a/thesisenv/lib/python3.6/site-packages/post_office/locale/pl/LC_MESSAGES/django.po b/thesisenv/lib/python3.6/site-packages/post_office/locale/pl/LC_MESSAGES/django.po new file mode 100644 index 0000000..b280767 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/locale/pl/LC_MESSAGES/django.po @@ -0,0 +1,206 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2016-06-09 13:58+0200\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 " +"|| n%100>=20) ? 1 : 2);\n" + +#: post_office/admin.py:81 post_office/models.py:200 +msgid "Render template in alternative language" +msgstr "Wygeneruj szablon w alternatywnym języku" + +#: post_office/admin.py:82 post_office/models.py:199 +msgid "Language" +msgstr "Język" + +#: post_office/admin.py:111 +msgid "Default Content" +msgstr "Domyślna zawartość" + +#: post_office/admin.py:125 post_office/models.py:188 +msgid "Description" +msgstr "Opis" + +#: post_office/admin.py:131 +msgid "Languages" +msgstr "Języki" + +#: post_office/fields.py:11 +msgid "Comma-separated emails" +msgstr "Oddzielone przecinkami emaile" + +#: post_office/fields.py:20 +msgid "Only comma separated emails are allowed." +msgstr "Tylko oddzielone przecinkami emaile są dozwolone." + +#: post_office/models.py:36 +msgid "low" +msgstr "niski" + +#: post_office/models.py:36 +msgid "medium" +msgstr "średni" + +#: post_office/models.py:37 +msgid "high" +msgstr "wysoki" + +#: post_office/models.py:37 +msgid "now" +msgstr "natychmiastowy" + +#: post_office/models.py:38 post_office/models.py:164 +msgid "sent" +msgstr "wysłany" + +#: post_office/models.py:38 post_office/models.py:164 +msgid "failed" +msgstr "odrzucony" + +#: post_office/models.py:39 +msgid "queued" +msgstr "w kolejce" + +#: post_office/models.py:41 +msgid "Email From" +msgstr "Email od" + +#: post_office/models.py:43 +msgid "Email To" +msgstr "Email do" + +#: post_office/models.py:44 +msgid "Cc" +msgstr "Cc" + +#: post_office/models.py:46 post_office/models.py:193 +msgid "Subject" +msgstr "Temat" + +#: post_office/models.py:47 post_office/models.py:171 +msgid "Message" +msgstr "Wiadomość" + +#: post_office/models.py:48 +msgid "HTML Message" +msgstr "Wiadomość w HTML" + +#: post_office/models.py:55 post_office/models.py:169 +msgid "Status" +msgstr "Status" + +#: post_office/models.py:58 +msgid "Priority" +msgstr "Priorytet" + +#: post_office/models.py:63 +msgid "The scheduled sending time" +msgstr "Zaplanowany czas wysłania" + +#: post_office/models.py:65 +msgid "Headers" +msgstr "Nagłówki" + +#: post_office/models.py:67 +msgid "Email template" +msgstr "Szablon emaila" + +#: post_office/models.py:68 +msgid "Context" +msgstr "Kontekst" + +#: post_office/models.py:69 +msgid "Backend alias" +msgstr "Backend alias" + +#: post_office/models.py:74 +msgctxt "Email address" +msgid "Email" +msgstr "Email" + +#: post_office/models.py:75 +msgctxt "Email addresses" +msgid "Emails" +msgstr "Emaile" + +#: post_office/models.py:167 +msgid "Email address" +msgstr "Adres email" + +#: post_office/models.py:170 +msgid "Exception type" +msgstr "Typ wyjątku" + +#: post_office/models.py:175 +msgid "Log" +msgstr "Log" + +#: post_office/models.py:176 +msgid "Logs" +msgstr "Logi" + +#: post_office/models.py:187 post_office/models.py:241 +msgid "Name" +msgstr "Nazwa" + +#: post_office/models.py:187 +msgid "e.g: 'welcome_email'" +msgstr "np: 'powitalny_email'" + +#: post_office/models.py:189 +msgid "Description of this template." +msgstr "Opis tego szablonu" + +#: post_office/models.py:195 +msgid "Content" +msgstr "Zawartość" + +#: post_office/models.py:197 +msgid "HTML content" +msgstr "Zawartość w HTML" + +#: post_office/models.py:203 +msgid "Default template" +msgstr "Domyślna zawartość" + +#: post_office/models.py:208 +msgid "Email Template" +msgstr "Szablon emaila" + +#: post_office/models.py:209 +msgid "Email Templates" +msgstr "Szablony emaili" + +#: post_office/models.py:240 +msgid "File" +msgstr "Plik" + +#: post_office/models.py:241 +msgid "The original filename" +msgstr "Oryginalna nazwa pliku" + +#: post_office/models.py:243 +msgid "Email addresses" +msgstr "Adresy email" + +#: post_office/models.py:247 +msgid "Attachment" +msgstr "Załącznik" + +#: post_office/models.py:248 +msgid "Attachments" +msgstr "Załączniki" diff --git a/thesisenv/lib/python3.6/site-packages/post_office/locale/ru_RU/LC_MESSAGES/django.mo b/thesisenv/lib/python3.6/site-packages/post_office/locale/ru_RU/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..359b3038fc7eb3683de3a94c3e5d71dc51485193 GIT binary patch literal 3274 zcmZ{kTWlQF8GuhATO zB9RM-KmtZch|r*xpdujz!Z9&+;@d-|K2!+Jy!7(W3V7h9>O-ZfFTC{o&(7G1ogVF( z?_B=>{Qvp?|Li}mTk*D{73jCp-}`}5n`L}4A6oYcrMh4b{1NPfSHg$jMQ{iFDSRAW z1{)cN;nmDX;5G0F6#aMMZSX@V=l%g+1;2tK|95yf{1+5E|Ap7X6_<4OZ-ip68;ahh zj5{Er>M3{y+>*i1^)>z zh1c^Dy_?}r;9c-$*bjdUcSC-v!AI-_Q0&Ip`bai^F`FNTBKI2n8@U*T59XJ@O3DD{0WNPzrefT*RTXHX7f(C5sKWe`AEIAvibgO{%STq4zFeZLnwA8 zA>m(WVngEY(8V8VBEOQJJr!B9J_sd;chEIm;wkOt^lm%q#Gse4#6aRG?OwXrUQO?z zQ)2c_0{aH!z1>KccOf+(aTuvb-0HK&uW7gGxslpjRhxrW%XK0%?1pY^YTC41ugOl} z$D(UF600p{$Za=cov-Kf*tCY5C?XJ5LvJ|t0{`5x4u*8x@FJZH_NuL7L2nC#RyM5U zXLNS*X_RjV*}$#ULKEf7*=&VXI$y#pkugz6DqqfhQCml<#=5NIy~9Rr^O{CIS{c}` z2e9MTvq?3J#iPcpnJ`k@UBBLz-F(1OP?sL5TyclnGHR#a+^ee+V7)ZJ+6@g!Xa+Bs zTCW-mqFC=78uF?}4TfG2dhuShi#(ZdA#mFBQHV|GyRrAY(aqe8-LV_DBelD|=NVIt zRi$Ax2h=^^ZR#Ozm^{#U6Xa%5_% z;nf=oCsoVTymm|Z44-S8Hd~w#<7^;CoyTgB&gRhIec(G=sF{*pQz))=3jI!Dtu8)T>RWqn;r>DaL1&kF-jmZVEVr&BmxsTk z!}im=p6D8QY+&o68^zv2S5~b~h3uAy4HX_`Nr~^d}~9wPli^bu%Wm{@8921*;wrBL8X|fl*seUmT+HM) zHtb(zZ?V{= zlDCrSWG?w4nMh`nsbpfwLOQCFSQcA zogPRgQkjmg){MuJ$>cPO(^h#lJ(|9(SsF{`)6w)$dNi3yrsR-0kwX3p{Fq^eFJfdi zJtUUXgXy6QN@=Xm>GZJGnNDBfU~ZDWp^}dnp0d}Q{r^J0&78MdVspuYPLC`elZ^Eu zfJ5{4UgOvi=cdzR%dAb|@G*QMROk4S9u-g6Lu*DQzvF(>>0xnt;UN1*=U_97qe$h4 z&1HXpi@S3E;{N%@GE=!59}w4hZH=GB>Kj(z$KbL#Z$Egr<`B^eaTF)*FWbFjopJ&MPeZGxOp{eA34xA*u6GR>dHIb1h z9ph~Be1UpO&Md1Z%^f;%US5=>@I8C62?~zFPY>AYNMDg+B$}9*O6KG%IgzSclJWG| zxsjdI)Z3VrbWwy7i5Z=ab|N74bdpoZE?GXPk`FL!YvMTN@iMZfQKr;Mi+yG3OPxeW zS|zaagtb%82M`;#<4j>34>?Sp<{T#SinIy9A-f, YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2017-08-07 16:47+0300\n" +"PO-Revision-Date: 2017-08-07 16:49+0300\n" +"Language: ru_RU\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" +"%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" +"%100>=11 && n%100<=14)? 2 : 3);\n" +"Last-Translator: \n" +"Language-Team: \n" +"X-Generator: Poedit 1.8.11\n" + +#: post_office/admin.py:83 post_office/models.py:217 +msgid "Render template in alternative language" +msgstr "Отправить письмо на другом языке" + +#: post_office/admin.py:84 post_office/models.py:216 +msgid "Language" +msgstr "Язык" + +#: post_office/admin.py:113 +msgid "Default Content" +msgstr "Содержимое по умолчанию" + +#: post_office/admin.py:127 post_office/models.py:205 +msgid "Description" +msgstr "Описание" + +#: post_office/admin.py:133 +msgid "Languages" +msgstr "Языки" + +#: post_office/apps.py:7 +msgid "Post Office" +msgstr "Менеджер почты" + +#: post_office/fields.py:10 +msgid "Comma-separated emails" +msgstr "Список адресов, разделенных запятыми" + +#: post_office/fields.py:19 +msgid "Only comma separated emails are allowed." +msgstr "Разрешен только разделенный запятыми список адресов." + +#: post_office/models.py:34 +msgid "low" +msgstr "низкий" + +#: post_office/models.py:34 +msgid "medium" +msgstr "средний" + +#: post_office/models.py:35 +msgid "high" +msgstr "высокий" + +#: post_office/models.py:35 +msgid "now" +msgstr "сейчас" + +#: post_office/models.py:36 post_office/models.py:181 +msgid "sent" +msgstr "отправлен" + +#: post_office/models.py:36 post_office/models.py:181 +msgid "failed" +msgstr "ошибка" + +#: post_office/models.py:37 +msgid "queued" +msgstr "в очереди" + +#: post_office/models.py:39 +msgid "Email From" +msgstr "Отправитель" + +#: post_office/models.py:41 +msgid "Email To" +msgstr "Получатель" + +#: post_office/models.py:42 +msgid "Cc" +msgstr "Копия" + +#: post_office/models.py:44 post_office/models.py:210 +msgid "Subject" +msgstr "Тема" + +#: post_office/models.py:45 post_office/models.py:188 +msgid "Message" +msgstr "Сообщение" + +#: post_office/models.py:46 +msgid "HTML Message" +msgstr "HTML-сообщение" + +#: post_office/models.py:53 post_office/models.py:186 +msgid "Status" +msgstr "Статус" + +#: post_office/models.py:56 +msgid "Priority" +msgstr "Приоритет" + +#: post_office/models.py:61 +msgid "The scheduled sending time" +msgstr "Запланированное время отправки" + +#: post_office/models.py:63 +msgid "Headers" +msgstr "Заголовки" + +#: post_office/models.py:65 +msgid "Email template" +msgstr "Шаблон письма" + +#: post_office/models.py:67 +msgid "Context" +msgstr "Контекст" + +#: post_office/models.py:68 +msgid "Backend alias" +msgstr "Имя бекенда" + +#: post_office/models.py:73 +msgctxt "Email address" +msgid "Email" +msgstr "Письмо" + +#: post_office/models.py:74 +msgctxt "Email addresses" +msgid "Emails" +msgstr "Письма" + +#: post_office/models.py:184 +msgid "Email address" +msgstr "Email-адрес" + +#: post_office/models.py:187 +msgid "Exception type" +msgstr "Тип исключения" + +#: post_office/models.py:192 +msgid "Log" +msgstr "Лог" + +#: post_office/models.py:193 +msgid "Logs" +msgstr "Логи" + +#: post_office/models.py:204 post_office/models.py:258 +msgid "Name" +msgstr "Имя" + +#: post_office/models.py:204 +msgid "e.g: 'welcome_email'" +msgstr "например: 'welcome_email'" + +#: post_office/models.py:206 +msgid "Description of this template." +msgstr "Описание шаблона." + +#: post_office/models.py:212 +msgid "Content" +msgstr "Содержимое" + +#: post_office/models.py:214 +msgid "HTML content" +msgstr "HTML-содержимое" + +#: post_office/models.py:220 +msgid "Default template" +msgstr "Шаблон по умолчанию" + +#: post_office/models.py:225 +msgid "Email Template" +msgstr "Шаблон письма" + +#: post_office/models.py:226 +msgid "Email Templates" +msgstr "Шаблоны писем" + +#: post_office/models.py:257 +msgid "File" +msgstr "Файл" + +#: post_office/models.py:258 +msgid "The original filename" +msgstr "Исходное имя файла" + +#: post_office/models.py:260 +msgid "Email addresses" +msgstr "Email адреса" + +#: post_office/models.py:265 +msgid "Attachment" +msgstr "Вложение" + +#: post_office/models.py:266 +msgid "Attachments" +msgstr "Вложения" diff --git a/thesisenv/lib/python3.6/site-packages/post_office/lockfile.py b/thesisenv/lib/python3.6/site-packages/post_office/lockfile.py new file mode 100644 index 0000000..a9a2f81 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/lockfile.py @@ -0,0 +1,148 @@ +# This module is taken from https://gist.github.com/ionrock/3015700 + +# A file lock implementation that tries to avoid platform specific +# issues. It is inspired by a whole bunch of different implementations +# listed below. + +# - https://bitbucket.org/jaraco/yg.lockfile/src/6c448dcbf6e5/yg/lockfile/__init__.py +# - http://svn.zope.org/zc.lockfile/trunk/src/zc/lockfile/__init__.py?rev=121133&view=markup +# - http://stackoverflow.com/questions/489861/locking-a-file-in-python +# - http://www.evanfosmark.com/2009/01/cross-platform-file-locking-support-in-python/ +# - http://packages.python.org/lockfile/lockfile.html + +# There are some tests below and a blog posting conceptually the +# problems I wanted to try and solve. The tests reflect these ideas. + +# - http://ionrock.wordpress.com/2012/06/28/file-locking-in-python/ + +# I'm not advocating using this package. But if you do happen to try it +# out and have suggestions please let me know. + +import os +import time + + +class FileLocked(Exception): + pass + + +class FileLock(object): + + def __init__(self, lock_filename, timeout=None, force=False): + self.lock_filename = '%s.lock' % lock_filename + self.timeout = timeout + self.force = force + self._pid = str(os.getpid()) + # Store pid in a file in the same directory as desired lockname + self.pid_filename = os.path.join( + os.path.dirname(self.lock_filename), + self._pid, + ) + '.lock' + + def get_lock_pid(self): + try: + return int(open(self.lock_filename).read()) + except IOError: + # If we can't read symbolic link, there are two possibilities: + # 1. The symbolic link is dead (point to non existing file) + # 2. Symbolic link is not there + # In either case, we can safely release the lock + self.release() + + def valid_lock(self): + """ + See if the lock exists and is left over from an old process. + """ + + lock_pid = self.get_lock_pid() + + # If we're unable to get lock_pid + if lock_pid is None: + return False + + # this is our process + if self._pid == lock_pid: + return True + + # it is/was another process + # see if it is running + try: + os.kill(lock_pid, 0) + except OSError: + self.release() + return False + + # it is running + return True + + def is_locked(self, force=False): + # We aren't locked + if not self.valid_lock(): + return False + + # We are locked, but we want to force it without waiting + if not self.timeout: + if self.force: + self.release() + return False + else: + # We're not waiting or forcing the lock + raise FileLocked() + + # Locked, but want to wait for an unlock + interval = .1 + intervals = int(self.timeout / interval) + + while intervals: + if self.valid_lock(): + intervals -= 1 + time.sleep(interval) + #print('stopping %s' % intervals) + else: + return True + + # check one last time + if self.valid_lock(): + if self.force: + self.release() + else: + # still locked :( + raise FileLocked() + + def acquire(self): + """Create a pid filename and create a symlink (the actual lock file) + across platforms that points to it. Symlink is used because it's an + atomic operation across platforms. + """ + + pid_file = os.open(self.pid_filename, os.O_CREAT | os.O_EXCL | os.O_RDWR) + os.write(pid_file, str(os.getpid()).encode('utf-8')) + os.close(pid_file) + + if hasattr(os, 'symlink'): + os.symlink(self.pid_filename, self.lock_filename) + else: + # Windows platforms doesn't support symlinks, at least not through the os API + self.lock_filename = self.pid_filename + + + def release(self): + """Try to delete the lock files. Doesn't matter if we fail""" + if self.lock_filename != self.pid_filename: + try: + os.unlink(self.lock_filename) + except OSError: + pass + + try: + os.remove(self.pid_filename) + except OSError: + pass + + def __enter__(self): + if not self.is_locked(): + self.acquire() + return self + + def __exit__(self, type, value, traceback): + self.release() diff --git a/thesisenv/lib/python3.6/site-packages/post_office/logutils.py b/thesisenv/lib/python3.6/site-packages/post_office/logutils.py new file mode 100644 index 0000000..101ed3c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/logutils.py @@ -0,0 +1,37 @@ +import logging + +from .compat import dictConfig + + +# Taken from https://github.com/nvie/rq/blob/master/rq/logutils.py +def setup_loghandlers(level=None): + # Setup logging for post_office if not already configured + logger = logging.getLogger('post_office') + if not logger.handlers: + dictConfig({ + "version": 1, + "disable_existing_loggers": False, + + "formatters": { + "post_office": { + "format": "[%(levelname)s]%(asctime)s PID %(process)d: %(message)s", + "datefmt": "%Y-%m-%d %H:%M:%S", + }, + }, + + "handlers": { + "post_office": { + "level": "DEBUG", + "class": "logging.StreamHandler", + "formatter": "post_office" + }, + }, + + "loggers": { + "post_office": { + "handlers": ["post_office"], + "level": level or "DEBUG" + } + } + }) + return logger diff --git a/thesisenv/lib/python3.6/site-packages/post_office/mail.py b/thesisenv/lib/python3.6/site-packages/post_office/mail.py new file mode 100644 index 0000000..d70355c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/mail.py @@ -0,0 +1,305 @@ +from multiprocessing import Pool +from multiprocessing.dummy import Pool as ThreadPool + +from django.conf import settings +from django.core.exceptions import ValidationError +from django.db import connection as db_connection +from django.db.models import Q +from django.template import Context, Template +from django.utils.timezone import now + +from .connections import connections +from .models import Email, EmailTemplate, Log, PRIORITY, STATUS +from .settings import (get_available_backends, get_batch_size, + get_log_level, get_sending_order, get_threads_per_process) +from .utils import (get_email_template, parse_emails, parse_priority, + split_emails, create_attachments) +from .logutils import setup_loghandlers + + +logger = setup_loghandlers("INFO") + + +def create(sender, recipients=None, cc=None, bcc=None, subject='', message='', + html_message='', context=None, scheduled_time=None, headers=None, + template=None, priority=None, render_on_delivery=False, commit=True, + backend=''): + """ + Creates an email from supplied keyword arguments. If template is + specified, email subject and content will be rendered during delivery. + """ + priority = parse_priority(priority) + status = None if priority == PRIORITY.now else STATUS.queued + + if recipients is None: + recipients = [] + if cc is None: + cc = [] + if bcc is None: + bcc = [] + if context is None: + context = '' + + # If email is to be rendered during delivery, save all necessary + # information + if render_on_delivery: + email = Email( + from_email=sender, + to=recipients, + cc=cc, + bcc=bcc, + scheduled_time=scheduled_time, + headers=headers, priority=priority, status=status, + context=context, template=template, backend_alias=backend + ) + + else: + + if template: + subject = template.subject + message = template.content + html_message = template.html_content + + _context = Context(context or {}) + subject = Template(subject).render(_context) + message = Template(message).render(_context) + html_message = Template(html_message).render(_context) + + email = Email( + from_email=sender, + to=recipients, + cc=cc, + bcc=bcc, + subject=subject, + message=message, + html_message=html_message, + scheduled_time=scheduled_time, + headers=headers, priority=priority, status=status, + backend_alias=backend + ) + + if commit: + email.save() + + return email + + +def send(recipients=None, sender=None, template=None, context=None, subject='', + message='', html_message='', scheduled_time=None, headers=None, + priority=None, attachments=None, render_on_delivery=False, + log_level=None, commit=True, cc=None, bcc=None, language='', + backend=''): + + try: + recipients = parse_emails(recipients) + except ValidationError as e: + raise ValidationError('recipients: %s' % e.message) + + try: + cc = parse_emails(cc) + except ValidationError as e: + raise ValidationError('c: %s' % e.message) + + try: + bcc = parse_emails(bcc) + except ValidationError as e: + raise ValidationError('bcc: %s' % e.message) + + if sender is None: + sender = settings.DEFAULT_FROM_EMAIL + + priority = parse_priority(priority) + + if log_level is None: + log_level = get_log_level() + + if not commit: + if priority == PRIORITY.now: + raise ValueError("send_many() can't be used with priority = 'now'") + if attachments: + raise ValueError("Can't add attachments with send_many()") + + if template: + if subject: + raise ValueError('You can\'t specify both "template" and "subject" arguments') + if message: + raise ValueError('You can\'t specify both "template" and "message" arguments') + if html_message: + raise ValueError('You can\'t specify both "template" and "html_message" arguments') + + # template can be an EmailTemplate instance or name + if isinstance(template, EmailTemplate): + template = template + # If language is specified, ensure template uses the right language + if language: + if template.language != language: + template = template.translated_templates.get(language=language) + else: + template = get_email_template(template, language) + + if backend and backend not in get_available_backends().keys(): + raise ValueError('%s is not a valid backend alias' % backend) + + email = create(sender, recipients, cc, bcc, subject, message, html_message, + context, scheduled_time, headers, template, priority, + render_on_delivery, commit=commit, backend=backend) + + if attachments: + attachments = create_attachments(attachments) + email.attachments.add(*attachments) + + if priority == PRIORITY.now: + email.dispatch(log_level=log_level) + + return email + + +def send_many(kwargs_list): + """ + Similar to mail.send(), but this function accepts a list of kwargs. + Internally, it uses Django's bulk_create command for efficiency reasons. + Currently send_many() can't be used to send emails with priority = 'now'. + """ + emails = [] + for kwargs in kwargs_list: + emails.append(send(commit=False, **kwargs)) + Email.objects.bulk_create(emails) + + +def get_queued(): + """ + Returns a list of emails that should be sent: + - Status is queued + - Has scheduled_time lower than the current time or None + """ + return Email.objects.filter(status=STATUS.queued) \ + .select_related('template') \ + .filter(Q(scheduled_time__lte=now()) | Q(scheduled_time=None)) \ + .order_by(*get_sending_order()).prefetch_related('attachments')[:get_batch_size()] + + +def send_queued(processes=1, log_level=None): + """ + Sends out all queued mails that has scheduled_time less than now or None + """ + queued_emails = get_queued() + total_sent, total_failed = 0, 0 + total_email = len(queued_emails) + + logger.info('Started sending %s emails with %s processes.' % + (total_email, processes)) + + if log_level is None: + log_level = get_log_level() + + if queued_emails: + + # Don't use more processes than number of emails + if total_email < processes: + processes = total_email + + if processes == 1: + total_sent, total_failed = _send_bulk(queued_emails, + uses_multiprocessing=False, + log_level=log_level) + else: + email_lists = split_emails(queued_emails, processes) + + pool = Pool(processes) + results = pool.map(_send_bulk, email_lists) + pool.terminate() + + total_sent = sum([result[0] for result in results]) + total_failed = sum([result[1] for result in results]) + message = '%s emails attempted, %s sent, %s failed' % ( + total_email, + total_sent, + total_failed + ) + logger.info(message) + return (total_sent, total_failed) + + +def _send_bulk(emails, uses_multiprocessing=True, log_level=None): + # Multiprocessing does not play well with database connection + # Fix: Close connections on forking process + # https://groups.google.com/forum/#!topic/django-users/eCAIY9DAfG0 + if uses_multiprocessing: + db_connection.close() + + if log_level is None: + log_level = get_log_level() + + sent_emails = [] + failed_emails = [] # This is a list of two tuples (email, exception) + email_count = len(emails) + + logger.info('Process started, sending %s emails' % email_count) + + def send(email): + try: + email.dispatch(log_level=log_level, commit=False, + disconnect_after_delivery=False) + sent_emails.append(email) + logger.debug('Successfully sent email #%d' % email.id) + except Exception as e: + logger.debug('Failed to send email #%d' % email.id) + failed_emails.append((email, e)) + + # Prepare emails before we send these to threads for sending + # So we don't need to access the DB from within threads + for email in emails: + # Sometimes this can fail, for example when trying to render + # email from a faulty Django template + try: + email.prepare_email_message() + except Exception as e: + failed_emails.append((email, e)) + + number_of_threads = min(get_threads_per_process(), email_count) + pool = ThreadPool(number_of_threads) + + pool.map(send, emails) + pool.close() + pool.join() + + connections.close() + + # Update statuses of sent and failed emails + email_ids = [email.id for email in sent_emails] + Email.objects.filter(id__in=email_ids).update(status=STATUS.sent) + + email_ids = [email.id for (email, e) in failed_emails] + Email.objects.filter(id__in=email_ids).update(status=STATUS.failed) + + # If log level is 0, log nothing, 1 logs only sending failures + # and 2 means log both successes and failures + if log_level >= 1: + + logs = [] + for (email, exception) in failed_emails: + logs.append( + Log(email=email, status=STATUS.failed, + message=str(exception), + exception_type=type(exception).__name__) + ) + + if logs: + Log.objects.bulk_create(logs) + + if log_level == 2: + + logs = [] + for email in sent_emails: + logs.append(Log(email=email, status=STATUS.sent)) + + if logs: + Log.objects.bulk_create(logs) + + logger.info( + 'Process finished, %s attempted, %s sent, %s failed' % ( + email_count, len(sent_emails), len(failed_emails) + ) + ) + + return len(sent_emails), len(failed_emails) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/events/__init__.py b/thesisenv/lib/python3.6/site-packages/post_office/management/__init__.py similarity index 100% rename from thesisenv/lib/python3.6/site-packages/celery/tests/events/__init__.py rename to thesisenv/lib/python3.6/site-packages/post_office/management/__init__.py diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/fixups/__init__.py b/thesisenv/lib/python3.6/site-packages/post_office/management/commands/__init__.py similarity index 100% rename from thesisenv/lib/python3.6/site-packages/celery/tests/fixups/__init__.py rename to thesisenv/lib/python3.6/site-packages/post_office/management/commands/__init__.py diff --git a/thesisenv/lib/python3.6/site-packages/post_office/management/commands/cleanup_mail.py b/thesisenv/lib/python3.6/site-packages/post_office/management/commands/cleanup_mail.py new file mode 100644 index 0000000..5d10db9 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/management/commands/cleanup_mail.py @@ -0,0 +1,35 @@ +import datetime + +from django.core.management.base import BaseCommand +from django.utils.timezone import now + +from ...models import Attachment, Email + + +class Command(BaseCommand): + help = 'Place deferred messages back in the queue.' + + def add_arguments(self, parser): + parser.add_argument('-d', '--days', + type=int, default=90, + help="Cleanup mails older than this many days, defaults to 90.") + + parser.add_argument('-da', '--delete-attachments', action='store_true', + help="Delete orphaned attachments.") + + def handle(self, verbosity, days, delete_attachments, **options): + # Delete mails and their related logs and queued created before X days + + cutoff_date = now() - datetime.timedelta(days) + count = Email.objects.filter(created__lt=cutoff_date).count() + Email.objects.only('id').filter(created__lt=cutoff_date).delete() + print("Deleted {0} mails created before {1} ".format(count, cutoff_date)) + + if delete_attachments: + attachments = Attachment.objects.filter(emails=None) + attachments_count = len(attachments) + for attachment in attachments: + # Delete the actual file + attachment.file.delete() + attachments.delete() + print("Deleted {0} attachments".format(attachments_count)) diff --git a/thesisenv/lib/python3.6/site-packages/post_office/management/commands/send_queued_mail.py b/thesisenv/lib/python3.6/site-packages/post_office/management/commands/send_queued_mail.py new file mode 100644 index 0000000..ab739e9 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/management/commands/send_queued_mail.py @@ -0,0 +1,60 @@ +import tempfile +import sys + +from django.core.management.base import BaseCommand +from django.db import connection +from django.db.models import Q +from django.utils.timezone import now + +from ...lockfile import FileLock, FileLocked +from ...mail import send_queued +from ...models import Email, STATUS +from ...logutils import setup_loghandlers + + +logger = setup_loghandlers() +default_lockfile = tempfile.gettempdir() + "/post_office" + + +class Command(BaseCommand): + def add_arguments(self, parser): + parser.add_argument( + '-p', '--processes', + type=int, + default=1, + help='Number of processes used to send emails', + ) + parser.add_argument( + '-L', '--lockfile', + default=default_lockfile, + help='Absolute path of lockfile to acquire', + ) + parser.add_argument( + '-l', '--log-level', + type=int, + help='"0" to log nothing, "1" to only log errors', + ) + + def handle(self, *args, **options): + logger.info('Acquiring lock for sending queued emails at %s.lock' % + options['lockfile']) + try: + with FileLock(options['lockfile']): + + while 1: + try: + send_queued(options['processes'], + options.get('log_level')) + except Exception as e: + logger.error(e, exc_info=sys.exc_info(), + extra={'status_code': 500}) + raise + + # Close DB connection to avoid multiprocessing errors + connection.close() + + if not Email.objects.filter(status=STATUS.queued) \ + .filter(Q(scheduled_time__lte=now()) | Q(scheduled_time=None)).exists(): + break + except FileLocked: + logger.info('Failed to acquire lock, terminating now.') \ No newline at end of file diff --git a/thesisenv/lib/python3.6/site-packages/post_office/migrations/0001_initial.py b/thesisenv/lib/python3.6/site-packages/post_office/migrations/0001_initial.py new file mode 100644 index 0000000..a9c6270 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/migrations/0001_initial.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import models, migrations +import jsonfield.fields +import post_office.fields +import post_office.validators +import post_office.models + + +class Migration(migrations.Migration): + + dependencies = [ + ] + + operations = [ + migrations.CreateModel( + name='Attachment', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('file', models.FileField(upload_to=post_office.models.get_upload_path)), + ('name', models.CharField(help_text='The original filename', max_length=255)), + ], + options={ + }, + bases=(models.Model,), + ), + migrations.CreateModel( + name='Email', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('from_email', models.CharField(max_length=254, validators=[post_office.validators.validate_email_with_name])), + ('to', post_office.fields.CommaSeparatedEmailField(blank=True)), + ('cc', post_office.fields.CommaSeparatedEmailField(blank=True)), + ('bcc', post_office.fields.CommaSeparatedEmailField(blank=True)), + ('subject', models.CharField(max_length=255, blank=True)), + ('message', models.TextField(blank=True)), + ('html_message', models.TextField(blank=True)), + ('status', models.PositiveSmallIntegerField(blank=True, null=True, db_index=True, choices=[(0, 'sent'), (1, 'failed'), (2, 'queued')])), + ('priority', models.PositiveSmallIntegerField(blank=True, null=True, choices=[(0, 'low'), (1, 'medium'), (2, 'high'), (3, 'now')])), + ('created', models.DateTimeField(auto_now_add=True, db_index=True)), + ('last_updated', models.DateTimeField(auto_now=True, db_index=True)), + ('scheduled_time', models.DateTimeField(db_index=True, null=True, blank=True)), + ('headers', jsonfield.fields.JSONField(null=True, blank=True)), + ('context', jsonfield.fields.JSONField(null=True, blank=True)), + ], + options={ + }, + bases=(models.Model,), + ), + migrations.CreateModel( + name='EmailTemplate', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('name', models.CharField(help_text=b"e.g: 'welcome_email'", max_length=255)), + ('description', models.TextField(help_text='Description of this template.', blank=True)), + ('subject', models.CharField(blank=True, max_length=255, validators=[post_office.validators.validate_template_syntax])), + ('content', models.TextField(blank=True, validators=[post_office.validators.validate_template_syntax])), + ('html_content', models.TextField(blank=True, validators=[post_office.validators.validate_template_syntax])), + ('created', models.DateTimeField(auto_now_add=True)), + ('last_updated', models.DateTimeField(auto_now=True)), + ], + options={ + }, + bases=(models.Model,), + ), + migrations.CreateModel( + name='Log', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('date', models.DateTimeField(auto_now_add=True)), + ('status', models.PositiveSmallIntegerField(choices=[(0, 'sent'), (1, 'failed')])), + ('exception_type', models.CharField(max_length=255, blank=True)), + ('message', models.TextField()), + ('email', models.ForeignKey(related_name='logs', editable=False, on_delete=models.deletion.CASCADE, to='post_office.Email', )), + ], + options={ + }, + bases=(models.Model,), + ), + migrations.AddField( + model_name='email', + name='template', + field=models.ForeignKey(blank=True, on_delete=models.deletion.SET_NULL, to='post_office.EmailTemplate', null=True), + preserve_default=True, + ), + migrations.AddField( + model_name='attachment', + name='emails', + field=models.ManyToManyField(related_name='attachments', to='post_office.Email'), + preserve_default=True, + ), + ] diff --git a/thesisenv/lib/python3.6/site-packages/post_office/migrations/0002_add_i18n_and_backend_alias.py b/thesisenv/lib/python3.6/site-packages/post_office/migrations/0002_add_i18n_and_backend_alias.py new file mode 100644 index 0000000..c98f9bc --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/migrations/0002_add_i18n_and_backend_alias.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import models, migrations +import post_office.validators +import post_office.fields + + +class Migration(migrations.Migration): + + dependencies = [ + ('post_office', '0001_initial'), + ] + + operations = [ + migrations.AlterModelOptions( + name='emailtemplate', + options={'verbose_name': 'Email Template', 'verbose_name_plural': 'Email Templates'}, + ), + migrations.AddField( + model_name='email', + name='backend_alias', + field=models.CharField(default='', max_length=64, blank=True), + ), + migrations.AddField( + model_name='emailtemplate', + name='default_template', + field=models.ForeignKey(related_name='translated_templates', default=None, to='post_office.EmailTemplate', null=True, on_delete=models.deletion.SET_NULL), + ), + migrations.AddField( + model_name='emailtemplate', + name='language', + field=models.CharField(default='', help_text='Render template in alternative language', max_length=12, blank=True, choices=[(b'af', b'Afrikaans'), (b'ar', b'Arabic'), (b'ast', b'Asturian'), (b'az', b'Azerbaijani'), (b'bg', b'Bulgarian'), (b'be', b'Belarusian'), (b'bn', b'Bengali'), (b'br', b'Breton'), (b'bs', b'Bosnian'), (b'ca', b'Catalan'), (b'cs', b'Czech'), (b'cy', b'Welsh'), (b'da', b'Danish'), (b'de', b'German'), (b'el', b'Greek'), (b'en', b'English'), (b'en-au', b'Australian English'), (b'en-gb', b'British English'), (b'eo', b'Esperanto'), (b'es', b'Spanish'), (b'es-ar', b'Argentinian Spanish'), (b'es-mx', b'Mexican Spanish'), (b'es-ni', b'Nicaraguan Spanish'), (b'es-ve', b'Venezuelan Spanish'), (b'et', b'Estonian'), (b'eu', b'Basque'), (b'fa', b'Persian'), (b'fi', b'Finnish'), (b'fr', b'French'), (b'fy', b'Frisian'), (b'ga', b'Irish'), (b'gl', b'Galician'), (b'he', b'Hebrew'), (b'hi', b'Hindi'), (b'hr', b'Croatian'), (b'hu', b'Hungarian'), (b'ia', b'Interlingua'), (b'id', b'Indonesian'), (b'io', b'Ido'), (b'is', b'Icelandic'), (b'it', b'Italian'), (b'ja', b'Japanese'), (b'ka', b'Georgian'), (b'kk', b'Kazakh'), (b'km', b'Khmer'), (b'kn', b'Kannada'), (b'ko', b'Korean'), (b'lb', b'Luxembourgish'), (b'lt', b'Lithuanian'), (b'lv', b'Latvian'), (b'mk', b'Macedonian'), (b'ml', b'Malayalam'), (b'mn', b'Mongolian'), (b'mr', b'Marathi'), (b'my', b'Burmese'), (b'nb', b'Norwegian Bokmal'), (b'ne', b'Nepali'), (b'nl', b'Dutch'), (b'nn', b'Norwegian Nynorsk'), (b'os', b'Ossetic'), (b'pa', b'Punjabi'), (b'pl', b'Polish'), (b'pt', b'Portuguese'), (b'pt-br', b'Brazilian Portuguese'), (b'ro', b'Romanian'), (b'ru', b'Russian'), (b'sk', b'Slovak'), (b'sl', b'Slovenian'), (b'sq', b'Albanian'), (b'sr', b'Serbian'), (b'sr-latn', b'Serbian Latin'), (b'sv', b'Swedish'), (b'sw', b'Swahili'), (b'ta', b'Tamil'), (b'te', b'Telugu'), (b'th', b'Thai'), (b'tr', b'Turkish'), (b'tt', b'Tatar'), (b'udm', b'Udmurt'), (b'uk', b'Ukrainian'), (b'ur', b'Urdu'), (b'vi', b'Vietnamese'), (b'zh-cn', b'Simplified Chinese'), (b'zh-hans', b'Simplified Chinese'), (b'zh-hant', b'Traditional Chinese'), (b'zh-tw', b'Traditional Chinese')]), + ), + migrations.AlterField( + model_name='email', + name='bcc', + field=post_office.fields.CommaSeparatedEmailField(verbose_name='Bcc', blank=True), + ), + migrations.AlterField( + model_name='email', + name='cc', + field=post_office.fields.CommaSeparatedEmailField(verbose_name='Cc', blank=True), + ), + migrations.AlterField( + model_name='email', + name='from_email', + field=models.CharField(max_length=254, verbose_name='Email From', validators=[post_office.validators.validate_email_with_name]), + ), + migrations.AlterField( + model_name='email', + name='html_message', + field=models.TextField(verbose_name='HTML Message', blank=True), + ), + migrations.AlterField( + model_name='email', + name='message', + field=models.TextField(verbose_name='Message', blank=True), + ), + migrations.AlterField( + model_name='email', + name='subject', + field=models.CharField(max_length=255, verbose_name='Subject', blank=True), + ), + migrations.AlterField( + model_name='email', + name='to', + field=post_office.fields.CommaSeparatedEmailField(verbose_name='Email To', blank=True), + ), + migrations.AlterField( + model_name='emailtemplate', + name='content', + field=models.TextField(blank=True, verbose_name='Content', validators=[post_office.validators.validate_template_syntax]), + ), + migrations.AlterField( + model_name='emailtemplate', + name='html_content', + field=models.TextField(blank=True, verbose_name='HTML content', validators=[post_office.validators.validate_template_syntax]), + ), + migrations.AlterField( + model_name='emailtemplate', + name='subject', + field=models.CharField(blank=True, max_length=255, verbose_name='Subject', validators=[post_office.validators.validate_template_syntax]), + ), + migrations.AlterUniqueTogether( + name='emailtemplate', + unique_together=set([('language', 'default_template')]), + ), + ] diff --git a/thesisenv/lib/python3.6/site-packages/post_office/migrations/0003_longer_subject.py b/thesisenv/lib/python3.6/site-packages/post_office/migrations/0003_longer_subject.py new file mode 100644 index 0000000..8bc645f --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/migrations/0003_longer_subject.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.9 on 2016-02-04 08:08 +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('post_office', '0002_add_i18n_and_backend_alias'), + ] + + operations = [ + migrations.AlterField( + model_name='email', + name='subject', + field=models.CharField(blank=True, max_length=989, verbose_name='Subject'), + ), + migrations.AlterField( + model_name='emailtemplate', + name='language', + field=models.CharField(blank=True, choices=[(b'af', b'Afrikaans'), (b'ar', b'Arabic'), (b'ast', b'Asturian'), (b'az', b'Azerbaijani'), (b'bg', b'Bulgarian'), (b'be', b'Belarusian'), (b'bn', b'Bengali'), (b'br', b'Breton'), (b'bs', b'Bosnian'), (b'ca', b'Catalan'), (b'cs', b'Czech'), (b'cy', b'Welsh'), (b'da', b'Danish'), (b'de', b'German'), (b'el', b'Greek'), (b'en', b'English'), (b'en-au', b'Australian English'), (b'en-gb', b'British English'), (b'eo', b'Esperanto'), (b'es', b'Spanish'), (b'es-ar', b'Argentinian Spanish'), (b'es-co', b'Colombian Spanish'), (b'es-mx', b'Mexican Spanish'), (b'es-ni', b'Nicaraguan Spanish'), (b'es-ve', b'Venezuelan Spanish'), (b'et', b'Estonian'), (b'eu', b'Basque'), (b'fa', b'Persian'), (b'fi', b'Finnish'), (b'fr', b'French'), (b'fy', b'Frisian'), (b'ga', b'Irish'), (b'gd', b'Scottish Gaelic'), (b'gl', b'Galician'), (b'he', b'Hebrew'), (b'hi', b'Hindi'), (b'hr', b'Croatian'), (b'hu', b'Hungarian'), (b'ia', b'Interlingua'), (b'id', b'Indonesian'), (b'io', b'Ido'), (b'is', b'Icelandic'), (b'it', b'Italian'), (b'ja', b'Japanese'), (b'ka', b'Georgian'), (b'kk', b'Kazakh'), (b'km', b'Khmer'), (b'kn', b'Kannada'), (b'ko', b'Korean'), (b'lb', b'Luxembourgish'), (b'lt', b'Lithuanian'), (b'lv', b'Latvian'), (b'mk', b'Macedonian'), (b'ml', b'Malayalam'), (b'mn', b'Mongolian'), (b'mr', b'Marathi'), (b'my', b'Burmese'), (b'nb', b'Norwegian Bokmal'), (b'ne', b'Nepali'), (b'nl', b'Dutch'), (b'nn', b'Norwegian Nynorsk'), (b'os', b'Ossetic'), (b'pa', b'Punjabi'), (b'pl', b'Polish'), (b'pt', b'Portuguese'), (b'pt-br', b'Brazilian Portuguese'), (b'ro', b'Romanian'), (b'ru', b'Russian'), (b'sk', b'Slovak'), (b'sl', b'Slovenian'), (b'sq', b'Albanian'), (b'sr', b'Serbian'), (b'sr-latn', b'Serbian Latin'), (b'sv', b'Swedish'), (b'sw', b'Swahili'), (b'ta', b'Tamil'), (b'te', b'Telugu'), (b'th', b'Thai'), (b'tr', b'Turkish'), (b'tt', b'Tatar'), (b'udm', b'Udmurt'), (b'uk', b'Ukrainian'), (b'ur', b'Urdu'), (b'vi', b'Vietnamese'), (b'zh-hans', b'Simplified Chinese'), (b'zh-hant', b'Traditional Chinese')], default='', help_text='Render template in alternative language', max_length=12), + ), + ] diff --git a/thesisenv/lib/python3.6/site-packages/post_office/migrations/0004_auto_20160607_0901.py b/thesisenv/lib/python3.6/site-packages/post_office/migrations/0004_auto_20160607_0901.py new file mode 100644 index 0000000..b749054 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/migrations/0004_auto_20160607_0901.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.9.6 on 2016-06-07 07:01 +from __future__ import unicode_literals + +from django.db import migrations, models +import django.db.models.deletion +import jsonfield.fields +import post_office.models + + +class Migration(migrations.Migration): + + dependencies = [ + ('post_office', '0003_longer_subject'), + ] + + operations = [ + migrations.AlterModelOptions( + name='attachment', + options={'verbose_name': 'Attachment', 'verbose_name_plural': 'Attachments'}, + ), + migrations.AlterModelOptions( + name='email', + options={'verbose_name': 'Email', 'verbose_name_plural': 'Emails'}, + ), + migrations.AlterModelOptions( + name='log', + options={'verbose_name': 'Log', 'verbose_name_plural': 'Logs'}, + ), + migrations.AlterField( + model_name='attachment', + name='emails', + field=models.ManyToManyField(related_name='attachments', to='post_office.Email', verbose_name='Email addresses'), + ), + migrations.AlterField( + model_name='attachment', + name='file', + field=models.FileField(upload_to=post_office.models.get_upload_path, verbose_name='File'), + ), + migrations.AlterField( + model_name='attachment', + name='name', + field=models.CharField(help_text='The original filename', max_length=255, verbose_name='Name'), + ), + migrations.AlterField( + model_name='email', + name='backend_alias', + field=models.CharField(blank=True, default='', max_length=64, verbose_name='Backend alias'), + ), + migrations.AlterField( + model_name='email', + name='context', + field=jsonfield.fields.JSONField(blank=True, null=True, verbose_name='Context'), + ), + migrations.AlterField( + model_name='email', + name='headers', + field=jsonfield.fields.JSONField(blank=True, null=True, verbose_name='Headers'), + ), + migrations.AlterField( + model_name='email', + name='priority', + field=models.PositiveSmallIntegerField(blank=True, choices=[(0, 'low'), (1, 'medium'), (2, 'high'), (3, 'now')], null=True, verbose_name='Priority'), + ), + migrations.AlterField( + model_name='email', + name='scheduled_time', + field=models.DateTimeField(blank=True, db_index=True, null=True, verbose_name='The scheduled sending time'), + ), + migrations.AlterField( + model_name='email', + name='status', + field=models.PositiveSmallIntegerField(blank=True, choices=[(0, 'sent'), (1, 'failed'), (2, 'queued')], db_index=True, null=True, verbose_name='Status'), + ), + migrations.AlterField( + model_name='email', + name='template', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='post_office.EmailTemplate', verbose_name='Email template'), + ), + migrations.AlterField( + model_name='emailtemplate', + name='default_template', + field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translated_templates', to='post_office.EmailTemplate', verbose_name='Default template'), + ), + migrations.AlterField( + model_name='emailtemplate', + name='description', + field=models.TextField(blank=True, help_text='Description of this template.', verbose_name='Description'), + ), + migrations.AlterField( + model_name='emailtemplate', + name='language', + field=models.CharField(blank=True, default='', help_text='Render template in alternative language', max_length=12, verbose_name='Language'), + ), + migrations.AlterField( + model_name='emailtemplate', + name='name', + field=models.CharField(help_text="e.g: 'welcome_email'", max_length=255, verbose_name='Name'), + ), + migrations.AlterField( + model_name='log', + name='email', + field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='logs', to='post_office.Email', verbose_name='Email address'), + ), + migrations.AlterField( + model_name='log', + name='exception_type', + field=models.CharField(blank=True, max_length=255, verbose_name='Exception type'), + ), + migrations.AlterField( + model_name='log', + name='message', + field=models.TextField(verbose_name='Message'), + ), + migrations.AlterField( + model_name='log', + name='status', + field=models.PositiveSmallIntegerField(choices=[(0, 'sent'), (1, 'failed')], verbose_name='Status'), + ), + ] diff --git a/thesisenv/lib/python3.6/site-packages/post_office/migrations/0005_auto_20170515_0013.py b/thesisenv/lib/python3.6/site-packages/post_office/migrations/0005_auto_20170515_0013.py new file mode 100644 index 0000000..842b2e0 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/migrations/0005_auto_20170515_0013.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.1 on 2017-05-15 00:13 +from __future__ import unicode_literals + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('post_office', '0004_auto_20160607_0901'), + ] + + operations = [ + migrations.AlterUniqueTogether( + name='emailtemplate', + unique_together=set([('name', 'language', 'default_template')]), + ), + ] diff --git a/thesisenv/lib/python3.6/site-packages/post_office/migrations/0006_attachment_mimetype.py b/thesisenv/lib/python3.6/site-packages/post_office/migrations/0006_attachment_mimetype.py new file mode 100644 index 0000000..189f08f --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/migrations/0006_attachment_mimetype.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import models, migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('post_office', '0005_auto_20170515_0013'), + ] + + operations = [ + migrations.AddField( + model_name='attachment', + name='mimetype', + field=models.CharField(default='', max_length=255, blank=True), + ), + ] diff --git a/thesisenv/lib/python3.6/site-packages/post_office/migrations/0007_auto_20170731_1342.py b/thesisenv/lib/python3.6/site-packages/post_office/migrations/0007_auto_20170731_1342.py new file mode 100644 index 0000000..2745d99 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/migrations/0007_auto_20170731_1342.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.3 on 2017-07-31 11:42 +from __future__ import unicode_literals + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('post_office', '0006_attachment_mimetype'), + ] + + operations = [ + migrations.AlterModelOptions( + name='emailtemplate', + options={'ordering': ['name'], 'verbose_name': 'Email Template', 'verbose_name_plural': 'Email Templates'}, + ), + ] diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/functional/__init__.py b/thesisenv/lib/python3.6/site-packages/post_office/migrations/__init__.py similarity index 100% rename from thesisenv/lib/python3.6/site-packages/celery/tests/functional/__init__.py rename to thesisenv/lib/python3.6/site-packages/post_office/migrations/__init__.py diff --git a/thesisenv/lib/python3.6/site-packages/post_office/models.py b/thesisenv/lib/python3.6/site-packages/post_office/models.py new file mode 100644 index 0000000..b074343 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/models.py @@ -0,0 +1,284 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +import os + +from collections import namedtuple +from uuid import uuid4 + +from django.core.mail import EmailMessage, EmailMultiAlternatives +from django.db import models +from django.template import Context, Template +from django.utils.encoding import python_2_unicode_compatible +from django.utils.translation import pgettext_lazy +from django.utils.translation import ugettext_lazy as _ +from django.utils import timezone +from jsonfield import JSONField + +from post_office import cache +from post_office.fields import CommaSeparatedEmailField + +from .compat import text_type, smart_text +from .connections import connections +from .settings import context_field_class, get_log_level +from .validators import validate_email_with_name, validate_template_syntax + + +PRIORITY = namedtuple('PRIORITY', 'low medium high now')._make(range(4)) +STATUS = namedtuple('STATUS', 'sent failed queued')._make(range(3)) + + +@python_2_unicode_compatible +class Email(models.Model): + """ + A model to hold email information. + """ + + PRIORITY_CHOICES = [(PRIORITY.low, _("low")), (PRIORITY.medium, _("medium")), + (PRIORITY.high, _("high")), (PRIORITY.now, _("now"))] + STATUS_CHOICES = [(STATUS.sent, _("sent")), (STATUS.failed, _("failed")), + (STATUS.queued, _("queued"))] + + from_email = models.CharField(_("Email From"), max_length=254, + validators=[validate_email_with_name]) + to = CommaSeparatedEmailField(_("Email To")) + cc = CommaSeparatedEmailField(_("Cc")) + bcc = CommaSeparatedEmailField(_("Bcc")) + subject = models.CharField(_("Subject"), max_length=989, blank=True) + message = models.TextField(_("Message"), blank=True) + html_message = models.TextField(_("HTML Message"), blank=True) + """ + Emails with 'queued' status will get processed by ``send_queued`` command. + Status field will then be set to ``failed`` or ``sent`` depending on + whether it's successfully delivered. + """ + status = models.PositiveSmallIntegerField( + _("Status"), + choices=STATUS_CHOICES, db_index=True, + blank=True, null=True) + priority = models.PositiveSmallIntegerField(_("Priority"), + choices=PRIORITY_CHOICES, + blank=True, null=True) + created = models.DateTimeField(auto_now_add=True, db_index=True) + last_updated = models.DateTimeField(db_index=True, auto_now=True) + scheduled_time = models.DateTimeField(_('The scheduled sending time'), + blank=True, null=True, db_index=True) + headers = JSONField(_('Headers'), blank=True, null=True) + template = models.ForeignKey('post_office.EmailTemplate', blank=True, + null=True, verbose_name=_('Email template'), + on_delete=models.CASCADE) + context = context_field_class(_('Context'), blank=True, null=True) + backend_alias = models.CharField(_('Backend alias'), blank=True, default='', + max_length=64) + + class Meta: + app_label = 'post_office' + verbose_name = pgettext_lazy("Email address", "Email") + verbose_name_plural = pgettext_lazy("Email addresses", "Emails") + + def __init__(self, *args, **kwargs): + super(Email, self).__init__(*args, **kwargs) + self._cached_email_message = None + + def __str__(self): + return u'%s' % self.to + + def email_message(self): + """ + Returns Django EmailMessage object for sending. + """ + if self._cached_email_message: + return self._cached_email_message + + return self.prepare_email_message() + + def prepare_email_message(self): + """ + Returns a django ``EmailMessage`` or ``EmailMultiAlternatives`` object, + depending on whether html_message is empty. + """ + subject = smart_text(self.subject) + + if self.template is not None: + _context = Context(self.context) + subject = Template(self.template.subject).render(_context) + message = Template(self.template.content).render(_context) + html_message = Template(self.template.html_content).render(_context) + + else: + subject = self.subject + message = self.message + html_message = self.html_message + + connection = connections[self.backend_alias or 'default'] + + if html_message: + msg = EmailMultiAlternatives( + subject=subject, body=message, from_email=self.from_email, + to=self.to, bcc=self.bcc, cc=self.cc, + headers=self.headers, connection=connection) + msg.attach_alternative(html_message, "text/html") + else: + msg = EmailMessage( + subject=subject, body=message, from_email=self.from_email, + to=self.to, bcc=self.bcc, cc=self.cc, + headers=self.headers, connection=connection) + + for attachment in self.attachments.all(): + msg.attach(attachment.name, attachment.file.read(), mimetype=attachment.mimetype or None) + attachment.file.close() + + self._cached_email_message = msg + return msg + + def dispatch(self, log_level=None, + disconnect_after_delivery=True, commit=True): + """ + Sends email and log the result. + """ + try: + self.email_message().send() + status = STATUS.sent + message = '' + exception_type = '' + except Exception as e: + status = STATUS.failed + message = str(e) + exception_type = type(e).__name__ + + # If run in a bulk sending mode, reraise and let the outer + # layer handle the exception + if not commit: + raise + + if commit: + self.status = status + self.save(update_fields=['status']) + + if log_level is None: + log_level = get_log_level() + + # If log level is 0, log nothing, 1 logs only sending failures + # and 2 means log both successes and failures + if log_level == 1: + if status == STATUS.failed: + self.logs.create(status=status, message=message, + exception_type=exception_type) + elif log_level == 2: + self.logs.create(status=status, message=message, + exception_type=exception_type) + + return status + + def save(self, *args, **kwargs): + self.full_clean() + return super(Email, self).save(*args, **kwargs) + + +@python_2_unicode_compatible +class Log(models.Model): + """ + A model to record sending email sending activities. + """ + + STATUS_CHOICES = [(STATUS.sent, _("sent")), (STATUS.failed, _("failed"))] + + email = models.ForeignKey(Email, editable=False, related_name='logs', + verbose_name=_('Email address'), on_delete=models.CASCADE) + date = models.DateTimeField(auto_now_add=True) + status = models.PositiveSmallIntegerField(_('Status'), choices=STATUS_CHOICES) + exception_type = models.CharField(_('Exception type'), max_length=255, blank=True) + message = models.TextField(_('Message')) + + class Meta: + app_label = 'post_office' + verbose_name = _("Log") + verbose_name_plural = _("Logs") + + def __str__(self): + return text_type(self.date) + + +class EmailTemplateManager(models.Manager): + def get_by_natural_key(self, name, language, default_template): + return self.get(name=name, language=language, default_template=default_template) + + +@python_2_unicode_compatible +class EmailTemplate(models.Model): + """ + Model to hold template information from db + """ + name = models.CharField(_('Name'), max_length=255, help_text=_("e.g: 'welcome_email'")) + description = models.TextField(_('Description'), blank=True, + help_text=_("Description of this template.")) + created = models.DateTimeField(auto_now_add=True) + last_updated = models.DateTimeField(auto_now=True) + subject = models.CharField(max_length=255, blank=True, + verbose_name=_("Subject"), validators=[validate_template_syntax]) + content = models.TextField(blank=True, + verbose_name=_("Content"), validators=[validate_template_syntax]) + html_content = models.TextField(blank=True, + verbose_name=_("HTML content"), validators=[validate_template_syntax]) + language = models.CharField(max_length=12, + verbose_name=_("Language"), + help_text=_("Render template in alternative language"), + default='', blank=True) + default_template = models.ForeignKey('self', related_name='translated_templates', + null=True, default=None, verbose_name=_('Default template'), on_delete=models.CASCADE) + + objects = EmailTemplateManager() + + class Meta: + app_label = 'post_office' + unique_together = ('name', 'language', 'default_template') + verbose_name = _("Email Template") + verbose_name_plural = _("Email Templates") + ordering = ['name'] + + def __str__(self): + return u'%s %s' % (self.name, self.language) + + def natural_key(self): + return (self.name, self.language, self.default_template) + + def save(self, *args, **kwargs): + # If template is a translation, use default template's name + if self.default_template and not self.name: + self.name = self.default_template.name + + template = super(EmailTemplate, self).save(*args, **kwargs) + cache.delete(self.name) + return template + + +def get_upload_path(instance, filename): + """Overriding to store the original filename""" + if not instance.name: + instance.name = filename # set original filename + date = timezone.now().date() + filename = '{name}.{ext}'.format(name=uuid4().hex, + ext=filename.split('.')[-1]) + + return os.path.join('post_office_attachments', str(date.year), + str(date.month), str(date.day), filename) + + +@python_2_unicode_compatible +class Attachment(models.Model): + """ + A model describing an email attachment. + """ + file = models.FileField(_('File'), upload_to=get_upload_path) + name = models.CharField(_('Name'), max_length=255, help_text=_("The original filename")) + emails = models.ManyToManyField(Email, related_name='attachments', + verbose_name=_('Email addresses')) + mimetype = models.CharField(max_length=255, default='', blank=True) + + class Meta: + app_label = 'post_office' + verbose_name = _("Attachment") + verbose_name_plural = _("Attachments") + + def __str__(self): + return self.name diff --git a/thesisenv/lib/python3.6/site-packages/post_office/settings.py b/thesisenv/lib/python3.6/site-packages/post_office/settings.py new file mode 100644 index 0000000..5d61ac0 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/settings.py @@ -0,0 +1,95 @@ +import warnings + +from django.conf import settings +from django.core.cache.backends.base import InvalidCacheBackendError + +from .compat import import_attribute, get_cache + + +def get_backend(alias='default'): + return get_available_backends()[alias] + + +def get_available_backends(): + """ Returns a dictionary of defined backend classes. For example: + { + 'default': 'django.core.mail.backends.smtp.EmailBackend', + 'locmem': 'django.core.mail.backends.locmem.EmailBackend', + } + """ + backends = get_config().get('BACKENDS', {}) + + if backends: + return backends + + # Try to get backend settings from old style + # POST_OFFICE = { + # 'EMAIL_BACKEND': 'mybackend' + # } + backend = get_config().get('EMAIL_BACKEND') + if backend: + warnings.warn('Please use the new POST_OFFICE["BACKENDS"] settings', + DeprecationWarning) + + backends['default'] = backend + return backends + + # Fall back to Django's EMAIL_BACKEND definition + backends['default'] = getattr( + settings, 'EMAIL_BACKEND', + 'django.core.mail.backends.smtp.EmailBackend') + + # If EMAIL_BACKEND is set to use PostOfficeBackend + # and POST_OFFICE_BACKEND is not set, fall back to SMTP + if 'post_office.EmailBackend' in backends['default']: + backends['default'] = 'django.core.mail.backends.smtp.EmailBackend' + + return backends + + +def get_cache_backend(): + if hasattr(settings, 'CACHES'): + if "post_office" in settings.CACHES: + return get_cache("post_office") + else: + # Sometimes this raises InvalidCacheBackendError, which is ok too + try: + return get_cache("default") + except InvalidCacheBackendError: + pass + return None + + +def get_config(): + """ + Returns Post Office's configuration in dictionary format. e.g: + POST_OFFICE = { + 'BATCH_SIZE': 1000 + } + """ + return getattr(settings, 'POST_OFFICE', {}) + + +def get_batch_size(): + return get_config().get('BATCH_SIZE', 100) + + +def get_threads_per_process(): + return get_config().get('THREADS_PER_PROCESS', 5) + + +def get_default_priority(): + return get_config().get('DEFAULT_PRIORITY', 'medium') + + +def get_log_level(): + return get_config().get('LOG_LEVEL', 2) + + +def get_sending_order(): + return get_config().get('SENDING_ORDER', ['-priority']) + + +CONTEXT_FIELD_CLASS = get_config().get('CONTEXT_FIELD_CLASS', + 'jsonfield.JSONField') +context_field_class = import_attribute(CONTEXT_FIELD_CLASS) diff --git a/thesisenv/lib/python3.6/site-packages/post_office/test_settings.py b/thesisenv/lib/python3.6/site-packages/post_office/test_settings.py new file mode 100644 index 0000000..63c28f7 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/test_settings.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- + + +import django +from distutils.version import StrictVersion + +DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.sqlite3', + }, +} + +# Default values: True +# POST_OFFICE_CACHE = True +# POST_OFFICE_TEMPLATE_CACHE = True + + +CACHES = { + 'default': { + 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', + 'TIMEOUT': 36000, + 'KEY_PREFIX': 'post-office', + }, + 'post_office': { + 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', + 'TIMEOUT': 36000, + 'KEY_PREFIX': 'post-office', + } +} + +POST_OFFICE = { + 'BACKENDS': { + 'default': 'django.core.mail.backends.dummy.EmailBackend', + 'locmem': 'django.core.mail.backends.locmem.EmailBackend', + 'error': 'post_office.tests.test_backends.ErrorRaisingBackend', + 'smtp': 'django.core.mail.backends.smtp.EmailBackend', + 'connection_tester': 'post_office.tests.test_mail.ConnectionTestingBackend', + } +} + + +INSTALLED_APPS = ( + 'django.contrib.admin', + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + 'post_office', +) + +SECRET_KEY = 'a' + +ROOT_URLCONF = 'post_office.test_urls' + +DEFAULT_FROM_EMAIL = 'webmaster@example.com' + +if StrictVersion(str(django.get_version())) < '1.10': + MIDDLEWARE_CLASSES = ( + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + ) +else: + MIDDLEWARE = [ + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + ] + +TEMPLATES = [ + { + 'BACKEND': 'django.template.backends.django.DjangoTemplates', + 'DIRS': [], + 'APP_DIRS': True, + 'OPTIONS': { + 'context_processors': [ + 'django.contrib.auth.context_processors.auth', + 'django.template.context_processors.debug', + 'django.template.context_processors.i18n', + 'django.template.context_processors.media', + 'django.template.context_processors.static', + 'django.template.context_processors.tz', + 'django.contrib.messages.context_processors.messages', + ], + }, + }, +] diff --git a/thesisenv/lib/python3.6/site-packages/post_office/test_urls.py b/thesisenv/lib/python3.6/site-packages/post_office/test_urls.py new file mode 100644 index 0000000..ede2ec9 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/test_urls.py @@ -0,0 +1,6 @@ +from django.conf.urls import url +from django.contrib import admin + +urlpatterns = [ + url(r'^admin/', admin.site.urls), +] diff --git a/thesisenv/lib/python3.6/site-packages/post_office/tests/__init__.py b/thesisenv/lib/python3.6/site-packages/post_office/tests/__init__.py new file mode 100644 index 0000000..f682b8b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/tests/__init__.py @@ -0,0 +1,8 @@ +from .test_backends import BackendTest +from .test_commands import CommandTest +from .test_lockfile import LockTest +from .test_mail import MailTest +from .test_models import ModelTest +from .test_utils import UtilsTest +from .test_cache import CacheTest +from .test_views import AdminViewTest diff --git a/thesisenv/lib/python3.6/site-packages/post_office/tests/test_backends.py b/thesisenv/lib/python3.6/site-packages/post_office/tests/test_backends.py new file mode 100644 index 0000000..3b122d5 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/tests/test_backends.py @@ -0,0 +1,113 @@ +from django.conf import settings +from django.core.mail import EmailMultiAlternatives, send_mail, EmailMessage +from django.core.mail.backends.base import BaseEmailBackend +from django.test import TestCase +from django.test.utils import override_settings + +from ..models import Email, STATUS, PRIORITY +from ..settings import get_backend + + +class ErrorRaisingBackend(BaseEmailBackend): + """ + An EmailBackend that always raises an error during sending + to test if django_mailer handles sending error correctly + """ + + def send_messages(self, email_messages): + raise Exception('Fake Error') + + +class BackendTest(TestCase): + + @override_settings(EMAIL_BACKEND='post_office.EmailBackend') + def test_email_backend(self): + """ + Ensure that email backend properly queue email messages. + """ + send_mail('Test', 'Message', 'from@example.com', ['to@example.com']) + email = Email.objects.latest('id') + self.assertEqual(email.subject, 'Test') + self.assertEqual(email.status, STATUS.queued) + self.assertEqual(email.priority, PRIORITY.medium) + + def test_email_backend_setting(self): + """ + + """ + old_email_backend = getattr(settings, 'EMAIL_BACKEND', None) + old_post_office_backend = getattr(settings, 'POST_OFFICE_BACKEND', None) + if hasattr(settings, 'EMAIL_BACKEND'): + delattr(settings, 'EMAIL_BACKEND') + if hasattr(settings, 'POST_OFFICE_BACKEND'): + delattr(settings, 'POST_OFFICE_BACKEND') + + previous_settings = settings.POST_OFFICE + delattr(settings, 'POST_OFFICE') + # If no email backend is set, backend should default to SMTP + self.assertEqual(get_backend(), 'django.core.mail.backends.smtp.EmailBackend') + + # If EMAIL_BACKEND is set to PostOfficeBackend, use SMTP to send by default + setattr(settings, 'EMAIL_BACKEND', 'post_office.EmailBackend') + self.assertEqual(get_backend(), 'django.core.mail.backends.smtp.EmailBackend') + + # If EMAIL_BACKEND is set on new dictionary-styled settings, use that + setattr(settings, 'POST_OFFICE', {'EMAIL_BACKEND': 'test'}) + self.assertEqual(get_backend(), 'test') + delattr(settings, 'POST_OFFICE') + + if old_email_backend: + setattr(settings, 'EMAIL_BACKEND', old_email_backend) + else: + delattr(settings, 'EMAIL_BACKEND') + setattr(settings, 'POST_OFFICE', previous_settings) + + @override_settings(EMAIL_BACKEND='post_office.EmailBackend') + def test_sending_html_email(self): + """ + "text/html" attachments to Email should be persisted into the database + """ + message = EmailMultiAlternatives('subject', 'body', 'from@example.com', + ['recipient@example.com']) + message.attach_alternative('html', "text/html") + message.send() + email = Email.objects.latest('id') + self.assertEqual(email.html_message, 'html') + + @override_settings(EMAIL_BACKEND='post_office.EmailBackend') + def test_headers_sent(self): + """ + Test that headers are correctly set on the outgoing emails. + """ + message = EmailMessage('subject', 'body', 'from@example.com', + ['recipient@example.com'], + headers={'Reply-To': 'reply@example.com'}) + message.send() + email = Email.objects.latest('id') + self.assertEqual(email.headers, {'Reply-To': 'reply@example.com'}) + + @override_settings(EMAIL_BACKEND='post_office.EmailBackend') + def test_backend_attachments(self): + message = EmailMessage('subject', 'body', 'from@example.com', + ['recipient@example.com']) + + message.attach('attachment.txt', 'attachment content') + message.send() + + email = Email.objects.latest('id') + self.assertEqual(email.attachments.count(), 1) + self.assertEqual(email.attachments.all()[0].name, 'attachment.txt') + self.assertEqual(email.attachments.all()[0].file.read(), b'attachment content') + + @override_settings( + EMAIL_BACKEND='post_office.EmailBackend', + POST_OFFICE={ + 'DEFAULT_PRIORITY': 'now', + 'BACKENDS': {'default': 'django.core.mail.backends.dummy.EmailBackend'} + } + ) + def test_default_priority_now(self): + # If DEFAULT_PRIORITY is "now", mails should be sent right away + send_mail('Test', 'Message', 'from1@example.com', ['to@example.com']) + email = Email.objects.latest('id') + self.assertEqual(email.status, STATUS.sent) diff --git a/thesisenv/lib/python3.6/site-packages/post_office/tests/test_cache.py b/thesisenv/lib/python3.6/site-packages/post_office/tests/test_cache.py new file mode 100644 index 0000000..bde6dbd --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/tests/test_cache.py @@ -0,0 +1,41 @@ +from django.conf import settings +from django.test import TestCase + +from post_office import cache +from ..settings import get_cache_backend + + +class CacheTest(TestCase): + + def test_get_backend_settings(self): + """Test basic get backend function and its settings""" + # Sanity check + self.assertTrue('post_office' in settings.CACHES) + self.assertTrue(get_cache_backend()) + + # If no post office key is defined, it should return default + del(settings.CACHES['post_office']) + self.assertTrue(get_cache_backend()) + + # If no caches key in settings, it should return None + delattr(settings, 'CACHES') + self.assertEqual(None, get_cache_backend()) + + def test_get_cache_key(self): + """ + Test for converting names to cache key + """ + self.assertEqual('post_office:template:test', cache.get_cache_key('test')) + self.assertEqual('post_office:template:test-slugify', cache.get_cache_key('test slugify')) + + def test_basic_cache_operations(self): + """ + Test basic cache operations + """ + # clean test cache + cache.cache_backend.clear() + self.assertEqual(None, cache.get('test-cache')) + cache.set('test-cache', 'awesome content') + self.assertTrue('awesome content', cache.get('test-cache')) + cache.delete('test-cache') + self.assertEqual(None, cache.get('test-cache')) diff --git a/thesisenv/lib/python3.6/site-packages/post_office/tests/test_commands.py b/thesisenv/lib/python3.6/site-packages/post_office/tests/test_commands.py new file mode 100644 index 0000000..4948606 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/tests/test_commands.py @@ -0,0 +1,150 @@ +import datetime +import os + +from django.core.files.base import ContentFile +from django.core.management import call_command +from django.test import TestCase +from django.test.utils import override_settings +from django.utils.timezone import now + +from ..models import Attachment, Email, STATUS + + +class CommandTest(TestCase): + + def test_cleanup_mail_with_orphaned_attachments(self): + self.assertEqual(Email.objects.count(), 0) + email = Email.objects.create(to=['to@example.com'], + from_email='from@example.com', + subject='Subject') + + email.created = now() - datetime.timedelta(31) + email.save() + + attachment = Attachment() + attachment.file.save( + 'test.txt', content=ContentFile('test file content'), save=True + ) + email.attachments.add(attachment) + attachment_path = attachment.file.name + + # We have orphaned attachment now + call_command('cleanup_mail', days=30) + self.assertEqual(Email.objects.count(), 0) + self.assertEqual(Attachment.objects.count(), 1) + + # Actually cleanup orphaned attachments + call_command('cleanup_mail', '-da', days=30) + self.assertEqual(Email.objects.count(), 0) + self.assertEqual(Attachment.objects.count(), 0) + + # Check that the actual file has been deleted as well + self.assertFalse(os.path.exists(attachment_path)) + + # Check if the email attachment's actual file have been deleted + Email.objects.all().delete() + email = Email.objects.create(to=['to@example.com'], + from_email='from@example.com', + subject='Subject') + email.created = now() - datetime.timedelta(31) + email.save() + + attachment = Attachment() + attachment.file.save( + 'test.txt', content=ContentFile('test file content'), save=True + ) + email.attachments.add(attachment) + attachment_path = attachment.file.name + + # Simulate that the files have been deleted by accidents + os.remove(attachment_path) + + # No exceptions should break the cleanup + call_command('cleanup_mail', '-da', days=30) + self.assertEqual(Email.objects.count(), 0) + self.assertEqual(Attachment.objects.count(), 0) + + + def test_cleanup_mail(self): + """ + The ``cleanup_mail`` command deletes mails older than a specified + amount of days + """ + self.assertEqual(Email.objects.count(), 0) + + # The command shouldn't delete today's email + email = Email.objects.create(from_email='from@example.com', + to=['to@example.com']) + call_command('cleanup_mail', days=30) + self.assertEqual(Email.objects.count(), 1) + + # Email older than 30 days should be deleted + email.created = now() - datetime.timedelta(31) + email.save() + call_command('cleanup_mail', days=30) + self.assertEqual(Email.objects.count(), 0) + + TEST_SETTINGS = { + 'BACKENDS': { + 'default': 'django.core.mail.backends.dummy.EmailBackend', + }, + 'BATCH_SIZE': 1 + } + + @override_settings(POST_OFFICE=TEST_SETTINGS) + def test_send_queued_mail(self): + """ + Ensure that ``send_queued_mail`` behaves properly and sends all queued + emails in two batches. + """ + # Make sure that send_queued_mail with empty queue does not raise error + call_command('send_queued_mail', processes=1) + + Email.objects.create(from_email='from@example.com', + to=['to@example.com'], status=STATUS.queued) + Email.objects.create(from_email='from@example.com', + to=['to@example.com'], status=STATUS.queued) + call_command('send_queued_mail', processes=1) + self.assertEqual(Email.objects.filter(status=STATUS.sent).count(), 2) + self.assertEqual(Email.objects.filter(status=STATUS.queued).count(), 0) + + def test_successful_deliveries_logging(self): + """ + Successful deliveries are only logged when log_level is 2. + """ + email = Email.objects.create(from_email='from@example.com', + to=['to@example.com'], status=STATUS.queued) + call_command('send_queued_mail', log_level=0) + self.assertEqual(email.logs.count(), 0) + + email = Email.objects.create(from_email='from@example.com', + to=['to@example.com'], status=STATUS.queued) + call_command('send_queued_mail', log_level=1) + self.assertEqual(email.logs.count(), 0) + + email = Email.objects.create(from_email='from@example.com', + to=['to@example.com'], status=STATUS.queued) + call_command('send_queued_mail', log_level=2) + self.assertEqual(email.logs.count(), 1) + + def test_failed_deliveries_logging(self): + """ + Failed deliveries are logged when log_level is 1 and 2. + """ + email = Email.objects.create(from_email='from@example.com', + to=['to@example.com'], status=STATUS.queued, + backend_alias='error') + call_command('send_queued_mail', log_level=0) + self.assertEqual(email.logs.count(), 0) + + email = Email.objects.create(from_email='from@example.com', + to=['to@example.com'], status=STATUS.queued, + backend_alias='error') + call_command('send_queued_mail', log_level=1) + self.assertEqual(email.logs.count(), 1) + + email = Email.objects.create(from_email='from@example.com', + to=['to@example.com'], status=STATUS.queued, + backend_alias='error') + call_command('send_queued_mail', log_level=2) + self.assertEqual(email.logs.count(), 1) diff --git a/thesisenv/lib/python3.6/site-packages/post_office/tests/test_connections.py b/thesisenv/lib/python3.6/site-packages/post_office/tests/test_connections.py new file mode 100644 index 0000000..aecae75 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/tests/test_connections.py @@ -0,0 +1,13 @@ +from django.core.mail import backends +from django.test import TestCase + +from .test_backends import ErrorRaisingBackend +from ..connections import connections + + +class ConnectionTest(TestCase): + + def test_get_connection(self): + # Ensure ConnectionHandler returns the right connection + self.assertTrue(isinstance(connections['error'], ErrorRaisingBackend)) + self.assertTrue(isinstance(connections['locmem'], backends.locmem.EmailBackend)) diff --git a/thesisenv/lib/python3.6/site-packages/post_office/tests/test_lockfile.py b/thesisenv/lib/python3.6/site-packages/post_office/tests/test_lockfile.py new file mode 100644 index 0000000..852c236 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/tests/test_lockfile.py @@ -0,0 +1,75 @@ +import time +import os + +from django.test import TestCase + +from ..lockfile import FileLock, FileLocked + + +def setup_fake_lock(lock_file_name): + pid = os.getpid() + lockfile = '%s.lock' % pid + try: + os.remove(lock_file_name) + except OSError: + pass + os.symlink(lockfile, lock_file_name) + + +class LockTest(TestCase): + + def test_process_killed_force_unlock(self): + pid = os.getpid() + lockfile = '%s.lock' % pid + setup_fake_lock('test.lock') + + with open(lockfile, 'w+') as f: + f.write('9999999') + assert os.path.exists(lockfile) + with FileLock('test'): + assert True + + def test_force_unlock_in_same_process(self): + pid = os.getpid() + lockfile = '%s.lock' % pid + os.symlink(lockfile, 'test.lock') + + with open(lockfile, 'w+') as f: + f.write(str(os.getpid())) + + with FileLock('test', force=True): + assert True + + def test_exception_after_timeout(self): + pid = os.getpid() + lockfile = '%s.lock' % pid + setup_fake_lock('test.lock') + + with open(lockfile, 'w+') as f: + f.write(str(os.getpid())) + + try: + with FileLock('test', timeout=1): + assert False + except FileLocked: + assert True + + def test_force_after_timeout(self): + pid = os.getpid() + lockfile = '%s.lock' % pid + setup_fake_lock('test.lock') + + with open(lockfile, 'w+') as f: + f.write(str(os.getpid())) + + timeout = 1 + start = time.time() + with FileLock('test', timeout=timeout, force=True): + assert True + end = time.time() + assert end - start > timeout + + def test_get_lock_pid(self): + """Ensure get_lock_pid() works properly""" + with FileLock('test', timeout=1, force=True) as lock: + self.assertEqual(lock.get_lock_pid(), int(os.getpid())) diff --git a/thesisenv/lib/python3.6/site-packages/post_office/tests/test_mail.py b/thesisenv/lib/python3.6/site-packages/post_office/tests/test_mail.py new file mode 100644 index 0000000..df5b66d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/tests/test_mail.py @@ -0,0 +1,400 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals +from datetime import date, datetime + +from django.core import mail +from django.core.files.base import ContentFile +from django.conf import settings + +from django.test import TestCase +from django.test.utils import override_settings + +from ..settings import get_batch_size, get_log_level, get_threads_per_process +from ..models import Email, EmailTemplate, Attachment, PRIORITY, STATUS +from ..mail import (create, get_queued, + send, send_many, send_queued, _send_bulk) + + +connection_counter = 0 + + +class ConnectionTestingBackend(mail.backends.base.BaseEmailBackend): + ''' + An EmailBackend that increments a global counter when connection is opened + ''' + + def open(self): + global connection_counter + connection_counter += 1 + + def send_messages(self, email_messages): + pass + + +class MailTest(TestCase): + + @override_settings(EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend') + def test_send_queued_mail(self): + """ + Check that only queued messages are sent. + """ + kwargs = { + 'to': ['to@example.com'], + 'from_email': 'bob@example.com', + 'subject': 'Test', + 'message': 'Message', + } + failed_mail = Email.objects.create(status=STATUS.failed, **kwargs) + none_mail = Email.objects.create(status=None, **kwargs) + + # This should be the only email that gets sent + queued_mail = Email.objects.create(status=STATUS.queued, **kwargs) + send_queued() + self.assertNotEqual(Email.objects.get(id=failed_mail.id).status, STATUS.sent) + self.assertNotEqual(Email.objects.get(id=none_mail.id).status, STATUS.sent) + self.assertEqual(Email.objects.get(id=queued_mail.id).status, STATUS.sent) + + @override_settings(EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend') + def test_send_queued_mail_multi_processes(self): + """ + Check that send_queued works well with multiple processes + """ + kwargs = { + 'to': ['to@example.com'], + 'from_email': 'bob@example.com', + 'subject': 'Test', + 'message': 'Message', + 'status': STATUS.queued + } + + # All three emails should be sent + self.assertEqual(Email.objects.filter(status=STATUS.sent).count(), 0) + for i in range(3): + Email.objects.create(**kwargs) + total_sent, total_failed = send_queued(processes=2) + self.assertEqual(total_sent, 3) + + def test_send_bulk(self): + """ + Ensure _send_bulk() properly sends out emails. + """ + email = Email.objects.create( + to=['to@example.com'], from_email='bob@example.com', + subject='send bulk', message='Message', status=STATUS.queued, + backend_alias='locmem') + _send_bulk([email], uses_multiprocessing=False) + self.assertEqual(Email.objects.get(id=email.id).status, STATUS.sent) + self.assertEqual(len(mail.outbox), 1) + self.assertEqual(mail.outbox[0].subject, 'send bulk') + + @override_settings(EMAIL_BACKEND='post_office.tests.test_mail.ConnectionTestingBackend') + def test_send_bulk_reuses_open_connection(self): + """ + Ensure _send_bulk() only opens connection once to send multiple emails. + """ + global connection_counter + self.assertEqual(connection_counter, 0) + email = Email.objects.create(to=['to@example.com'], + from_email='bob@example.com', subject='', + message='', status=STATUS.queued, backend_alias='connection_tester') + email_2 = Email.objects.create(to=['to@example.com'], + from_email='bob@example.com', subject='', + message='', status=STATUS.queued, + backend_alias='connection_tester') + _send_bulk([email, email_2]) + self.assertEqual(connection_counter, 1) + + def test_get_queued(self): + """ + Ensure get_queued returns only emails that should be sent + """ + kwargs = { + 'to': 'to@example.com', + 'from_email': 'bob@example.com', + 'subject': 'Test', + 'message': 'Message', + } + self.assertEqual(list(get_queued()), []) + + # Emails with statuses failed, sent or None shouldn't be returned + Email.objects.create(status=STATUS.failed, **kwargs) + Email.objects.create(status=None, **kwargs) + Email.objects.create(status=STATUS.sent, **kwargs) + self.assertEqual(list(get_queued()), []) + + # Email with queued status and None as scheduled_time should be included + queued_email = Email.objects.create(status=STATUS.queued, + scheduled_time=None, **kwargs) + self.assertEqual(list(get_queued()), [queued_email]) + + # Email scheduled for the future should not be included + Email.objects.create(status=STATUS.queued, + scheduled_time=date(2020, 12, 13), **kwargs) + self.assertEqual(list(get_queued()), [queued_email]) + + # Email scheduled in the past should be included + past_email = Email.objects.create(status=STATUS.queued, + scheduled_time=date(2010, 12, 13), **kwargs) + self.assertEqual(list(get_queued()), [queued_email, past_email]) + + def test_get_batch_size(self): + """ + Ensure BATCH_SIZE setting is read correctly. + """ + previous_settings = settings.POST_OFFICE + self.assertEqual(get_batch_size(), 100) + setattr(settings, 'POST_OFFICE', {'BATCH_SIZE': 10}) + self.assertEqual(get_batch_size(), 10) + settings.POST_OFFICE = previous_settings + + def test_get_threads_per_process(self): + """ + Ensure THREADS_PER_PROCESS setting is read correctly. + """ + previous_settings = settings.POST_OFFICE + self.assertEqual(get_threads_per_process(), 5) + setattr(settings, 'POST_OFFICE', {'THREADS_PER_PROCESS': 10}) + self.assertEqual(get_threads_per_process(), 10) + settings.POST_OFFICE = previous_settings + + def test_get_log_level(self): + """ + Ensure LOG_LEVEL setting is read correctly. + """ + previous_settings = settings.POST_OFFICE + self.assertEqual(get_log_level(), 2) + setattr(settings, 'POST_OFFICE', {'LOG_LEVEL': 1}) + self.assertEqual(get_log_level(), 1) + # Restore ``LOG_LEVEL`` + setattr(settings, 'POST_OFFICE', {'LOG_LEVEL': 2}) + settings.POST_OFFICE = previous_settings + + def test_create(self): + """ + Test basic email creation + """ + + # Test that email is persisted only when commit=True + email = create( + sender='from@example.com', recipients=['to@example.com'], + commit=False + ) + self.assertEqual(email.id, None) + email = create( + sender='from@example.com', recipients=['to@example.com'], + commit=True + ) + self.assertNotEqual(email.id, None) + + # Test that email is created with the right status + email = create( + sender='from@example.com', recipients=['to@example.com'], + priority=PRIORITY.now + ) + self.assertEqual(email.status, None) + email = create( + sender='from@example.com', recipients=['to@example.com'], + priority=PRIORITY.high + ) + self.assertEqual(email.status, STATUS.queued) + + # Test that email is created with the right content + context = { + 'subject': 'My subject', + 'message': 'My message', + 'html': 'My html', + } + now = datetime.now() + email = create( + sender='from@example.com', recipients=['to@example.com'], + subject='Test {{ subject }}', message='Test {{ message }}', + html_message='Test {{ html }}', context=context, + scheduled_time=now, headers={'header': 'Test header'}, + ) + self.assertEqual(email.from_email, 'from@example.com') + self.assertEqual(email.to, ['to@example.com']) + self.assertEqual(email.subject, 'Test My subject') + self.assertEqual(email.message, 'Test My message') + self.assertEqual(email.html_message, 'Test My html') + self.assertEqual(email.scheduled_time, now) + self.assertEqual(email.headers, {'header': 'Test header'}) + + def test_send_many(self): + """Test send_many creates the right emails """ + kwargs_list = [ + {'sender': 'from@example.com', 'recipients': ['a@example.com']}, + {'sender': 'from@example.com', 'recipients': ['b@example.com']}, + ] + send_many(kwargs_list) + self.assertEqual(Email.objects.filter(to=['a@example.com']).count(), 1) + + def test_send_with_attachments(self): + attachments = { + 'attachment_file1.txt': ContentFile('content'), + 'attachment_file2.txt': ContentFile('content'), + } + email = send(recipients=['a@example.com', 'b@example.com'], + sender='from@example.com', message='message', + subject='subject', attachments=attachments) + + self.assertTrue(email.pk) + self.assertEqual(email.attachments.count(), 2) + + def test_send_with_render_on_delivery(self): + """ + Ensure that mail.send() create email instances with appropriate + fields being saved + """ + template = EmailTemplate.objects.create( + subject='Subject {{ name }}', + content='Content {{ name }}', + html_content='HTML {{ name }}' + ) + context = {'name': 'test'} + email = send(recipients=['a@example.com', 'b@example.com'], + template=template, context=context, + render_on_delivery=True) + self.assertEqual(email.subject, '') + self.assertEqual(email.message, '') + self.assertEqual(email.html_message, '') + self.assertEqual(email.template, template) + + # context shouldn't be persisted when render_on_delivery = False + email = send(recipients=['a@example.com'], + template=template, context=context, + render_on_delivery=False) + self.assertEqual(email.context, None) + + def test_send_with_attachments_multiple_recipients(self): + """Test reusing the same attachment objects for several email objects""" + attachments = { + 'attachment_file1.txt': ContentFile('content'), + 'attachment_file2.txt': ContentFile('content'), + } + email = send(recipients=['a@example.com', 'b@example.com'], + sender='from@example.com', message='message', + subject='subject', attachments=attachments) + + self.assertEqual(email.attachments.count(), 2) + self.assertEqual(Attachment.objects.count(), 2) + + def test_create_with_template(self): + """If render_on_delivery is True, subject and content + won't be rendered, context also won't be saved.""" + + template = EmailTemplate.objects.create( + subject='Subject {{ name }}', + content='Content {{ name }}', + html_content='HTML {{ name }}' + ) + context = {'name': 'test'} + email = create( + sender='from@example.com', recipients=['to@example.com'], + template=template, context=context, render_on_delivery=True + ) + self.assertEqual(email.subject, '') + self.assertEqual(email.message, '') + self.assertEqual(email.html_message, '') + self.assertEqual(email.context, context) + self.assertEqual(email.template, template) + + def test_create_with_template_and_empty_context(self): + """If render_on_delivery is False, subject and content + will be rendered, context won't be saved.""" + + template = EmailTemplate.objects.create( + subject='Subject {% now "Y" %}', + content='Content {% now "Y" %}', + html_content='HTML {% now "Y" %}' + ) + context = None + email = create( + sender='from@example.com', recipients=['to@example.com'], + template=template, context=context + ) + today = date.today() + current_year = today.year + self.assertEqual(email.subject, 'Subject %d' % current_year) + self.assertEqual(email.message, 'Content %d' % current_year) + self.assertEqual(email.html_message, 'HTML %d' % current_year) + self.assertEqual(email.context, None) + self.assertEqual(email.template, None) + + def test_backend_alias(self): + """Test backend_alias field is properly set.""" + + email = send(recipients=['a@example.com'], + sender='from@example.com', message='message', + subject='subject') + self.assertEqual(email.backend_alias, '') + + email = send(recipients=['a@example.com'], + sender='from@example.com', message='message', + subject='subject', backend='locmem') + self.assertEqual(email.backend_alias, 'locmem') + + with self.assertRaises(ValueError): + send(recipients=['a@example.com'], sender='from@example.com', + message='message', subject='subject', backend='foo') + + @override_settings(LANGUAGES=(('en', 'English'), ('ru', 'Russian'))) + def test_send_with_template(self): + """If render_on_delivery is False, subject and content + will be rendered, context won't be saved.""" + + template = EmailTemplate.objects.create( + subject='Subject {{ name }}', + content='Content {{ name }}', + html_content='HTML {{ name }}' + ) + russian_template = EmailTemplate( + default_template=template, + language='ru', + subject='предмет {{ name }}', + content='содержание {{ name }}', + html_content='HTML {{ name }}' + ) + russian_template.save() + + context = {'name': 'test'} + email = send(recipients=['to@example.com'], sender='from@example.com', + template=template, context=context) + email = Email.objects.get(id=email.id) + self.assertEqual(email.subject, 'Subject test') + self.assertEqual(email.message, 'Content test') + self.assertEqual(email.html_message, 'HTML test') + self.assertEqual(email.context, None) + self.assertEqual(email.template, None) + + # check, if we use the Russian version + email = send(recipients=['to@example.com'], sender='from@example.com', + template=russian_template, context=context) + email = Email.objects.get(id=email.id) + self.assertEqual(email.subject, 'предмет test') + self.assertEqual(email.message, 'содержание test') + self.assertEqual(email.html_message, 'HTML test') + self.assertEqual(email.context, None) + self.assertEqual(email.template, None) + + # Check that send picks template with the right language + email = send(recipients=['to@example.com'], sender='from@example.com', + template=template, context=context, language='ru') + email = Email.objects.get(id=email.id) + self.assertEqual(email.subject, 'предмет test') + + email = send(recipients=['to@example.com'], sender='from@example.com', + template=template, context=context, language='ru', + render_on_delivery=True) + self.assertEqual(email.template.language, 'ru') + + def test_send_bulk_with_faulty_template(self): + template = EmailTemplate.objects.create( + subject='{% if foo %}Subject {{ name }}', + content='Content {{ name }}', + html_content='HTML {{ name }}' + ) + email = Email.objects.create(to='to@example.com', from_email='from@example.com', + template=template, status=STATUS.queued) + _send_bulk([email], uses_multiprocessing=False) + email = Email.objects.get(id=email.id) + self.assertEqual(email.status, STATUS.failed) \ No newline at end of file diff --git a/thesisenv/lib/python3.6/site-packages/post_office/tests/test_models.py b/thesisenv/lib/python3.6/site-packages/post_office/tests/test_models.py new file mode 100644 index 0000000..1d2245d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/tests/test_models.py @@ -0,0 +1,332 @@ +import django +import json +import os + +from datetime import datetime, timedelta + +from django.conf import settings as django_settings +from django.core import mail +from django.core import serializers +from django.core.files.base import ContentFile +from django.core.mail import EmailMessage, EmailMultiAlternatives +from django.forms.models import modelform_factory +from django.test import TestCase +from django.utils import timezone + +from ..models import Email, Log, PRIORITY, STATUS, EmailTemplate, Attachment +from ..mail import send + + +class ModelTest(TestCase): + + def test_email_message(self): + """ + Test to make sure that model's "email_message" method + returns proper email classes. + """ + + # If ``html_message`` is set, ``EmailMultiAlternatives`` is expected + email = Email.objects.create(to=['to@example.com'], + from_email='from@example.com', subject='Subject', + message='Message', html_message='

HTML

') + message = email.email_message() + self.assertEqual(type(message), EmailMultiAlternatives) + self.assertEqual(message.from_email, 'from@example.com') + self.assertEqual(message.to, ['to@example.com']) + self.assertEqual(message.subject, 'Subject') + self.assertEqual(message.body, 'Message') + self.assertEqual(message.alternatives, [('

HTML

', 'text/html')]) + + # Without ``html_message``, ``EmailMessage`` class is expected + email = Email.objects.create(to=['to@example.com'], + from_email='from@example.com', subject='Subject', + message='Message') + message = email.email_message() + self.assertEqual(type(message), EmailMessage) + self.assertEqual(message.from_email, 'from@example.com') + self.assertEqual(message.to, ['to@example.com']) + self.assertEqual(message.subject, 'Subject') + self.assertEqual(message.body, 'Message') + + def test_email_message_render(self): + """ + Ensure Email instance with template is properly rendered. + """ + template = EmailTemplate.objects.create( + subject='Subject {{ name }}', + content='Content {{ name }}', + html_content='HTML {{ name }}' + ) + context = {'name': 'test'} + email = Email.objects.create(to=['to@example.com'], template=template, + from_email='from@e.com', context=context) + message = email.email_message() + self.assertEqual(message.subject, 'Subject test') + self.assertEqual(message.body, 'Content test') + self.assertEqual(message.alternatives[0][0], 'HTML test') + + def test_dispatch(self): + """ + Ensure that email.dispatch() actually sends out the email + """ + email = Email.objects.create(to=['to@example.com'], from_email='from@example.com', + subject='Test dispatch', message='Message', backend_alias='locmem') + email.dispatch() + self.assertEqual(mail.outbox[0].subject, 'Test dispatch') + + def test_status_and_log(self): + """ + Ensure that status and log are set properly on successful sending + """ + email = Email.objects.create(to=['to@example.com'], from_email='from@example.com', + subject='Test', message='Message', backend_alias='locmem', id=333) + # Ensure that after dispatch status and logs are correctly set + email.dispatch() + log = Log.objects.latest('id') + self.assertEqual(email.status, STATUS.sent) + self.assertEqual(log.email, email) + + def test_status_and_log_on_error(self): + """ + Ensure that status and log are set properly on sending failure + """ + email = Email.objects.create(to=['to@example.com'], from_email='from@example.com', + subject='Test', message='Message', + backend_alias='error') + # Ensure that after dispatch status and logs are correctly set + email.dispatch() + log = Log.objects.latest('id') + self.assertEqual(email.status, STATUS.failed) + self.assertEqual(log.email, email) + self.assertEqual(log.status, STATUS.failed) + self.assertEqual(log.message, 'Fake Error') + self.assertEqual(log.exception_type, 'Exception') + + def test_errors_while_getting_connection_are_logged(self): + """ + Ensure that status and log are set properly on sending failure + """ + email = Email.objects.create(to=['to@example.com'], subject='Test', + from_email='from@example.com', + message='Message', backend_alias='random') + # Ensure that after dispatch status and logs are correctly set + email.dispatch() + log = Log.objects.latest('id') + self.assertEqual(email.status, STATUS.failed) + self.assertEqual(log.email, email) + self.assertEqual(log.status, STATUS.failed) + self.assertIn('is not a valid', log.message) + + def test_default_sender(self): + email = send(['to@example.com'], subject='foo') + self.assertEqual(email.from_email, + django_settings.DEFAULT_FROM_EMAIL) + + def test_send_argument_checking(self): + """ + mail.send() should raise an Exception if: + - "template" is used with "subject", "message" or "html_message" + - recipients is not in tuple or list format + """ + self.assertRaises(ValueError, send, ['to@example.com'], 'from@a.com', + template='foo', subject='bar') + self.assertRaises(ValueError, send, ['to@example.com'], 'from@a.com', + template='foo', message='bar') + self.assertRaises(ValueError, send, ['to@example.com'], 'from@a.com', + template='foo', html_message='bar') + self.assertRaises(ValueError, send, 'to@example.com', 'from@a.com', + template='foo', html_message='bar') + self.assertRaises(ValueError, send, cc='cc@example.com', sender='from@a.com', + template='foo', html_message='bar') + self.assertRaises(ValueError, send, bcc='bcc@example.com', sender='from@a.com', + template='foo', html_message='bar') + + def test_send_with_template(self): + """ + Ensure mail.send correctly creates templated emails to recipients + """ + Email.objects.all().delete() + headers = {'Reply-to': 'reply@email.com'} + email_template = EmailTemplate.objects.create(name='foo', subject='bar', + content='baz') + scheduled_time = datetime.now() + timedelta(days=1) + email = send(recipients=['to1@example.com', 'to2@example.com'], sender='from@a.com', + headers=headers, template=email_template, + scheduled_time=scheduled_time) + self.assertEqual(email.to, ['to1@example.com', 'to2@example.com']) + self.assertEqual(email.headers, headers) + self.assertEqual(email.scheduled_time, scheduled_time) + + # Test without header + Email.objects.all().delete() + email = send(recipients=['to1@example.com', 'to2@example.com'], sender='from@a.com', + template=email_template) + self.assertEqual(email.to, ['to1@example.com', 'to2@example.com']) + self.assertEqual(email.headers, None) + + def test_send_without_template(self): + headers = {'Reply-to': 'reply@email.com'} + scheduled_time = datetime.now() + timedelta(days=1) + email = send(sender='from@a.com', + recipients=['to1@example.com', 'to2@example.com'], + cc=['cc1@example.com', 'cc2@example.com'], + bcc=['bcc1@example.com', 'bcc2@example.com'], + subject='foo', message='bar', html_message='baz', + context={'name': 'Alice'}, headers=headers, + scheduled_time=scheduled_time, priority=PRIORITY.low) + + self.assertEqual(email.to, ['to1@example.com', 'to2@example.com']) + self.assertEqual(email.cc, ['cc1@example.com', 'cc2@example.com']) + self.assertEqual(email.bcc, ['bcc1@example.com', 'bcc2@example.com']) + self.assertEqual(email.subject, 'foo') + self.assertEqual(email.message, 'bar') + self.assertEqual(email.html_message, 'baz') + self.assertEqual(email.headers, headers) + self.assertEqual(email.priority, PRIORITY.low) + self.assertEqual(email.scheduled_time, scheduled_time) + + # Same thing, but now with context + email = send(['to1@example.com'], 'from@a.com', + subject='Hi {{ name }}', message='Message {{ name }}', + html_message='{{ name }}', + context={'name': 'Bob'}, headers=headers) + self.assertEqual(email.to, ['to1@example.com']) + self.assertEqual(email.subject, 'Hi Bob') + self.assertEqual(email.message, 'Message Bob') + self.assertEqual(email.html_message, 'Bob') + self.assertEqual(email.headers, headers) + + def test_invalid_syntax(self): + """ + Ensures that invalid template syntax will result in validation errors + when saving a ModelForm of an EmailTemplate. + """ + data = dict( + name='cost', + subject='Hi there!{{ }}', + content='Welcome {{ name|titl }} to the site.', + html_content='{% block content %}

Welcome to the site

' + ) + + EmailTemplateForm = modelform_factory(EmailTemplate, + exclude=['template']) + form = EmailTemplateForm(data) + + self.assertFalse(form.is_valid()) + + self.assertEqual(form.errors['default_template'], [u'This field is required.']) + self.assertEqual(form.errors['content'], [u"Invalid filter: 'titl'"]) + self.assertIn(form.errors['html_content'], + [[u'Unclosed tags: endblock '], + [u"Unclosed tag on line 1: 'block'. Looking for one of: endblock."]]) + self.assertIn(form.errors['subject'], + [[u'Empty variable tag'], [u'Empty variable tag on line 1']]) + + def test_string_priority(self): + """ + Regression test for: + https://github.com/ui/django-post_office/issues/23 + """ + email = send(['to1@example.com'], 'from@a.com', priority='low') + self.assertEqual(email.priority, PRIORITY.low) + + def test_default_priority(self): + email = send(recipients=['to1@example.com'], sender='from@a.com') + self.assertEqual(email.priority, PRIORITY.medium) + + def test_string_priority_exception(self): + invalid_priority_send = lambda: send(['to1@example.com'], 'from@a.com', priority='hgh') + + with self.assertRaises(ValueError) as context: + invalid_priority_send() + + self.assertEqual( + str(context.exception), + 'Invalid priority, must be one of: low, medium, high, now' + ) + + def test_send_recipient_display_name(self): + """ + Regression test for: + https://github.com/ui/django-post_office/issues/73 + """ + email = send(recipients=['Alice Bob '], sender='from@a.com') + self.assertTrue(email.to) + + def test_attachment_filename(self): + attachment = Attachment() + + attachment.file.save( + 'test.txt', + content=ContentFile('test file content'), + save=True + ) + self.assertEqual(attachment.name, 'test.txt') + + # Test that it is saved to the correct subdirectory + date = timezone.now().date() + expected_path = os.path.join('post_office_attachments', str(date.year), + str(date.month), str(date.day)) + self.assertTrue(expected_path in attachment.file.name) + + def test_attachments_email_message(self): + email = Email.objects.create(to=['to@example.com'], + from_email='from@example.com', + subject='Subject') + + attachment = Attachment() + attachment.file.save( + 'test.txt', content=ContentFile('test file content'), save=True + ) + email.attachments.add(attachment) + message = email.email_message() + + # https://docs.djangoproject.com/en/1.11/releases/1.11/#email + if django.VERSION >= (1, 11,): + self.assertEqual(message.attachments, + [('test.txt', 'test file content', 'text/plain')]) + else: + self.assertEqual(message.attachments, + [('test.txt', b'test file content', None)]) + + def test_attachments_email_message_with_mimetype(self): + email = Email.objects.create(to=['to@example.com'], + from_email='from@example.com', + subject='Subject') + + attachment = Attachment() + attachment.file.save( + 'test.txt', content=ContentFile('test file content'), save=True + ) + attachment.mimetype = 'text/plain' + attachment.save() + email.attachments.add(attachment) + message = email.email_message() + + if django.VERSION >= (1, 11,): + self.assertEqual(message.attachments, + [('test.txt', 'test file content', 'text/plain')]) + else: + self.assertEqual(message.attachments, + [('test.txt', b'test file content', 'text/plain')]) + + def test_translated_template_uses_default_templates_name(self): + template = EmailTemplate.objects.create(name='name') + id_template = template.translated_templates.create(language='id') + self.assertEqual(id_template.name, template.name) + + def test_models_repr(self): + self.assertEqual(repr(EmailTemplate(name='test', language='en')), + '') + self.assertEqual(repr(Email(to=['test@example.com'])), + "") + + def test_natural_key(self): + template = EmailTemplate.objects.create(name='name') + self.assertEqual(template, EmailTemplate.objects.get_by_natural_key(*template.natural_key())) + + data = serializers.serialize('json', [template], use_natural_primary_keys=True) + self.assertNotIn('pk', json.loads(data)[0]) + deserialized_objects = serializers.deserialize('json', data, use_natural_primary_keys=True) + list(deserialized_objects)[0].save() + self.assertEqual(EmailTemplate.objects.count(), 1) diff --git a/thesisenv/lib/python3.6/site-packages/post_office/tests/test_utils.py b/thesisenv/lib/python3.6/site-packages/post_office/tests/test_utils.py new file mode 100644 index 0000000..dbdf5bd --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/tests/test_utils.py @@ -0,0 +1,203 @@ +from django.core.files.base import ContentFile +from django.core.exceptions import ValidationError + +from django.test import TestCase +from django.test.utils import override_settings + +from ..models import Email, STATUS, PRIORITY, EmailTemplate, Attachment +from ..utils import (create_attachments, get_email_template, parse_emails, + parse_priority, send_mail, split_emails) +from ..validators import validate_email_with_name, validate_comma_separated_emails + + +@override_settings(EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend') +class UtilsTest(TestCase): + + def test_mail_status(self): + """ + Check that send_mail assigns the right status field to Email instances + """ + send_mail('subject', 'message', 'from@example.com', ['to@example.com'], + priority=PRIORITY.medium) + email = Email.objects.latest('id') + self.assertEqual(email.status, STATUS.queued) + + # Emails sent with "now" priority is sent right away + send_mail('subject', 'message', 'from@example.com', ['to@example.com'], + priority=PRIORITY.now) + email = Email.objects.latest('id') + self.assertEqual(email.status, STATUS.sent) + + def test_email_validator(self): + # These should validate + validate_email_with_name('email@example.com') + validate_email_with_name('Alice Bob ') + Email.objects.create(to=['to@example.com'], from_email='Alice ', + subject='Test', message='Message', status=STATUS.sent) + + # Should also support international domains + validate_email_with_name('Alice Bob ') + + # These should raise ValidationError + self.assertRaises(ValidationError, validate_email_with_name, 'invalid') + self.assertRaises(ValidationError, validate_email_with_name, 'Al ') + + def test_comma_separated_email_list_validator(self): + # These should validate + validate_comma_separated_emails(['email@example.com']) + validate_comma_separated_emails( + ['email@example.com', 'email2@example.com', 'email3@example.com'] + ) + validate_comma_separated_emails(['Alice Bob ']) + + # Should also support international domains + validate_comma_separated_emails(['email@example.co.id']) + + # These should raise ValidationError + self.assertRaises(ValidationError, validate_comma_separated_emails, + ['email@example.com', 'invalid_mail', 'email@example.com']) + + def test_get_template_email(self): + # Sanity Check + name = 'customer/happy-holidays' + self.assertRaises(EmailTemplate.DoesNotExist, get_email_template, name) + template = EmailTemplate.objects.create(name=name, content='test') + + # First query should hit database + self.assertNumQueries(1, lambda: get_email_template(name)) + # Second query should hit cache instead + self.assertNumQueries(0, lambda: get_email_template(name)) + + # It should return the correct template + self.assertEqual(template, get_email_template(name)) + + # Repeat with language support + template = EmailTemplate.objects.create(name=name, content='test', + language='en') + # First query should hit database + self.assertNumQueries(1, lambda: get_email_template(name, 'en')) + # Second query should hit cache instead + self.assertNumQueries(0, lambda: get_email_template(name, 'en')) + + # It should return the correct template + self.assertEqual(template, get_email_template(name, 'en')) + + def test_template_caching_settings(self): + """Check if POST_OFFICE_CACHE and POST_OFFICE_TEMPLATE_CACHE understood + correctly + """ + def is_cache_used(suffix='', desired_cache=False): + """Raise exception if real cache usage not equal to desired_cache value + """ + # to avoid cache cleaning - just create new template + name = 'can_i/suport_cache_settings%s' % suffix + self.assertRaises( + EmailTemplate.DoesNotExist, get_email_template, name + ) + EmailTemplate.objects.create(name=name, content='test') + + # First query should hit database anyway + self.assertNumQueries(1, lambda: get_email_template(name)) + # Second query should hit cache instead only if we want it + self.assertNumQueries( + 0 if desired_cache else 1, + lambda: get_email_template(name) + ) + return + + # default - use cache + is_cache_used(suffix='with_default_cache', desired_cache=True) + + # disable cache + with self.settings(POST_OFFICE_CACHE=False): + is_cache_used(suffix='cache_disabled_global', desired_cache=False) + with self.settings(POST_OFFICE_TEMPLATE_CACHE=False): + is_cache_used( + suffix='cache_disabled_for_templates', desired_cache=False + ) + with self.settings(POST_OFFICE_CACHE=True, POST_OFFICE_TEMPLATE_CACHE=False): + is_cache_used( + suffix='cache_disabled_for_templates_but_enabled_global', + desired_cache=False + ) + return + + def test_split_emails(self): + """ + Check that split emails correctly divide email lists for multiprocessing + """ + for i in range(225): + Email.objects.create(from_email='from@example.com', to=['to@example.com']) + expected_size = [57, 56, 56, 56] + email_list = split_emails(Email.objects.all(), 4) + self.assertEqual(expected_size, [len(emails) for emails in email_list]) + + def test_create_attachments(self): + attachments = create_attachments({ + 'attachment_file1.txt': ContentFile('content'), + 'attachment_file2.txt': ContentFile('content'), + }) + + self.assertEqual(len(attachments), 2) + self.assertIsInstance(attachments[0], Attachment) + self.assertTrue(attachments[0].pk) + self.assertEqual(attachments[0].file.read(), b'content') + self.assertTrue(attachments[0].name.startswith('attachment_file')) + self.assertEquals(attachments[0].mimetype, u'') + + def test_create_attachments_with_mimetype(self): + attachments = create_attachments({ + 'attachment_file1.txt': { + 'file': ContentFile('content'), + 'mimetype': 'text/plain' + }, + 'attachment_file2.jpg': { + 'file': ContentFile('content'), + 'mimetype': 'text/plain' + } + }) + + self.assertEqual(len(attachments), 2) + self.assertIsInstance(attachments[0], Attachment) + self.assertTrue(attachments[0].pk) + self.assertEquals(attachments[0].file.read(), b'content') + self.assertTrue(attachments[0].name.startswith('attachment_file')) + self.assertEquals(attachments[0].mimetype, 'text/plain') + + def test_create_attachments_open_file(self): + attachments = create_attachments({ + 'attachment_file.py': __file__, + }) + + self.assertEqual(len(attachments), 1) + self.assertIsInstance(attachments[0], Attachment) + self.assertTrue(attachments[0].pk) + self.assertTrue(attachments[0].file.read()) + self.assertEquals(attachments[0].name, 'attachment_file.py') + self.assertEquals(attachments[0].mimetype, u'') + + def test_parse_priority(self): + self.assertEqual(parse_priority('now'), PRIORITY.now) + self.assertEqual(parse_priority('high'), PRIORITY.high) + self.assertEqual(parse_priority('medium'), PRIORITY.medium) + self.assertEqual(parse_priority('low'), PRIORITY.low) + + def test_parse_emails(self): + # Converts a single email to list of email + self.assertEqual( + parse_emails('test@example.com'), + ['test@example.com'] + ) + + # None is converted into an empty list + self.assertEqual(parse_emails(None), []) + + # Raises ValidationError if email is invalid + self.assertRaises( + ValidationError, + parse_emails, 'invalid_email' + ) + self.assertRaises( + ValidationError, + parse_emails, ['invalid_email', 'test@example.com'] + ) diff --git a/thesisenv/lib/python3.6/site-packages/post_office/tests/test_views.py b/thesisenv/lib/python3.6/site-packages/post_office/tests/test_views.py new file mode 100644 index 0000000..8283666 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/tests/test_views.py @@ -0,0 +1,35 @@ +from django.contrib.auth.models import User +from django.test.client import Client +from django.test import TestCase + +try: + from django.urls import reverse +except ImportError: + from django.core.urlresolvers import reverse + +from post_office import mail +from post_office.models import Email + + +admin_username = 'real_test_admin' +admin_email = 'read@admin.com' +admin_pass = 'admin_pass' + + +class AdminViewTest(TestCase): + def setUp(self): + user = User.objects.create_superuser(admin_username, admin_email, admin_pass) + self.client = Client() + self.client.login(username=user.username, password=admin_pass) + + # Small test to make sure the admin interface is loaded + def test_admin_interface(self): + response = self.client.get(reverse('admin:index')) + self.assertEqual(response.status_code, 200) + + def test_admin_change_page(self): + """Ensure that changing an email object in admin works.""" + mail.send(recipients=['test@example.com'], headers={'foo': 'bar'}) + email = Email.objects.latest('id') + response = self.client.get(reverse('admin:post_office_email_change', args=[email.id])) + self.assertEqual(response.status_code, 200) diff --git a/thesisenv/lib/python3.6/site-packages/post_office/utils.py b/thesisenv/lib/python3.6/site-packages/post_office/utils.py new file mode 100644 index 0000000..11c4b39 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/utils.py @@ -0,0 +1,138 @@ +from django.conf import settings +from django.core.exceptions import ValidationError +from django.core.files import File +from django.utils.encoding import force_text + +from post_office import cache +from .compat import string_types +from .models import Email, PRIORITY, STATUS, EmailTemplate, Attachment +from .settings import get_default_priority +from .validators import validate_email_with_name + + +def send_mail(subject, message, from_email, recipient_list, html_message='', + scheduled_time=None, headers=None, priority=PRIORITY.medium): + """ + Add a new message to the mail queue. This is a replacement for Django's + ``send_mail`` core email method. + """ + + subject = force_text(subject) + status = None if priority == PRIORITY.now else STATUS.queued + emails = [] + for address in recipient_list: + emails.append( + Email.objects.create( + from_email=from_email, to=address, subject=subject, + message=message, html_message=html_message, status=status, + headers=headers, priority=priority, scheduled_time=scheduled_time + ) + ) + if priority == PRIORITY.now: + for email in emails: + email.dispatch() + return emails + + +def get_email_template(name, language=''): + """ + Function that returns an email template instance, from cache or DB. + """ + use_cache = getattr(settings, 'POST_OFFICE_CACHE', True) + if use_cache: + use_cache = getattr(settings, 'POST_OFFICE_TEMPLATE_CACHE', True) + if not use_cache: + return EmailTemplate.objects.get(name=name, language=language) + else: + composite_name = '%s:%s' % (name, language) + email_template = cache.get(composite_name) + if email_template is not None: + return email_template + else: + email_template = EmailTemplate.objects.get(name=name, + language=language) + cache.set(composite_name, email_template) + return email_template + + +def split_emails(emails, split_count=1): + # Group emails into X sublists + # taken from http://www.garyrobinson.net/2008/04/splitting-a-pyt.html + # Strange bug, only return 100 email if we do not evaluate the list + if list(emails): + return [emails[i::split_count] for i in range(split_count)] + + +def create_attachments(attachment_files): + """ + Create Attachment instances from files + + attachment_files is a dict of: + * Key - the filename to be used for the attachment. + * Value - file-like object, or a filename to open OR a dict of {'file': file-like-object, 'mimetype': string} + + Returns a list of Attachment objects + """ + attachments = [] + for filename, filedata in attachment_files.items(): + + if isinstance(filedata, dict): + content = filedata.get('file', None) + mimetype = filedata.get('mimetype', None) + else: + content = filedata + mimetype = None + + opened_file = None + + if isinstance(content, string_types): + # `content` is a filename - try to open the file + opened_file = open(content, 'rb') + content = File(opened_file) + + attachment = Attachment() + if mimetype: + attachment.mimetype = mimetype + attachment.file.save(filename, content=content, save=True) + + attachments.append(attachment) + + if opened_file is not None: + opened_file.close() + + return attachments + + +def parse_priority(priority): + if priority is None: + priority = get_default_priority() + # If priority is given as a string, returns the enum representation + if isinstance(priority, string_types): + priority = getattr(PRIORITY, priority, None) + + if priority is None: + raise ValueError('Invalid priority, must be one of: %s' % + ', '.join(PRIORITY._fields)) + return priority + + +def parse_emails(emails): + """ + A function that returns a list of valid email addresses. + This function will also convert a single email address into + a list of email addresses. + None value is also converted into an empty list. + """ + + if isinstance(emails, string_types): + emails = [emails] + elif emails is None: + emails = [] + + for email in emails: + try: + validate_email_with_name(email) + except ValidationError: + raise ValidationError('%s is not a valid email address' % email) + + return emails diff --git a/thesisenv/lib/python3.6/site-packages/post_office/validators.py b/thesisenv/lib/python3.6/site-packages/post_office/validators.py new file mode 100644 index 0000000..a1be485 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/validators.py @@ -0,0 +1,50 @@ +from django.core.exceptions import ValidationError +from django.core.validators import validate_email +from django.template import Template, TemplateSyntaxError, TemplateDoesNotExist +from django.utils.encoding import force_text + +from .compat import text_type + + +def validate_email_with_name(value): + """ + Validate email address. + + Both "Recipient Name " and "email@example.com" are valid. + """ + value = force_text(value) + + if '<' and '>' in value: + start = value.find('<') + 1 + end = value.find('>') + if start < end: + recipient = value[start:end] + else: + recipient = value + + validate_email(recipient) + + +def validate_comma_separated_emails(value): + """ + Validate every email address in a comma separated list of emails. + """ + if not isinstance(value, (tuple, list)): + raise ValidationError('Email list must be a list/tuple.') + + for email in value: + try: + validate_email_with_name(email) + except ValidationError: + raise ValidationError('Invalid email: %s' % email, code='invalid') + + +def validate_template_syntax(source): + """ + Basic Django Template syntax validation. This allows for robuster template + authoring. + """ + try: + Template(source) + except (TemplateSyntaxError, TemplateDoesNotExist) as err: + raise ValidationError(text_type(err)) diff --git a/thesisenv/lib/python3.6/site-packages/post_office/views.py b/thesisenv/lib/python3.6/site-packages/post_office/views.py new file mode 100644 index 0000000..60f00ef --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/post_office/views.py @@ -0,0 +1 @@ +# Create your views here. diff --git a/thesisenv/lib/python3.6/site-packages/uWSGI-2.0.17.1-py3.6.egg-info/PKG-INFO b/thesisenv/lib/python3.6/site-packages/uWSGI-2.0.17.1-py3.6.egg-info/PKG-INFO new file mode 100644 index 0000000..46806cc --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/uWSGI-2.0.17.1-py3.6.egg-info/PKG-INFO @@ -0,0 +1,20 @@ +Metadata-Version: 1.1 +Name: uWSGI +Version: 2.0.17.1 +Summary: The uWSGI server +Home-page: https://uwsgi-docs.readthedocs.io/en/latest/ +Author: Unbit +Author-email: info@unbit.it +License: GPLv2+ +Description: UNKNOWN +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+) +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 diff --git a/thesisenv/lib/python3.6/site-packages/uWSGI-2.0.17.1-py3.6.egg-info/SOURCES.txt b/thesisenv/lib/python3.6/site-packages/uWSGI-2.0.17.1-py3.6.egg-info/SOURCES.txt new file mode 100644 index 0000000..3d6004d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/uWSGI-2.0.17.1-py3.6.egg-info/SOURCES.txt @@ -0,0 +1,6 @@ +README +uwsgidecorators.py +uWSGI.egg-info/PKG-INFO +uWSGI.egg-info/SOURCES.txt +uWSGI.egg-info/dependency_links.txt +uWSGI.egg-info/top_level.txt \ No newline at end of file diff --git a/thesisenv/lib/python3.6/site-packages/uWSGI-2.0.17.1-py3.6.egg-info/dependency_links.txt b/thesisenv/lib/python3.6/site-packages/uWSGI-2.0.17.1-py3.6.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/uWSGI-2.0.17.1-py3.6.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/thesisenv/lib/python3.6/site-packages/uWSGI-2.0.17.1-py3.6.egg-info/installed-files.txt b/thesisenv/lib/python3.6/site-packages/uWSGI-2.0.17.1-py3.6.egg-info/installed-files.txt new file mode 100644 index 0000000..ac3e2fb --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/uWSGI-2.0.17.1-py3.6.egg-info/installed-files.txt @@ -0,0 +1,6 @@ +../__pycache__/uwsgidecorators.cpython-36.pyc +../uwsgidecorators.py +PKG-INFO +SOURCES.txt +dependency_links.txt +top_level.txt diff --git a/thesisenv/lib/python3.6/site-packages/uWSGI-2.0.17.1-py3.6.egg-info/top_level.txt b/thesisenv/lib/python3.6/site-packages/uWSGI-2.0.17.1-py3.6.egg-info/top_level.txt new file mode 100644 index 0000000..474f53a --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/uWSGI-2.0.17.1-py3.6.egg-info/top_level.txt @@ -0,0 +1 @@ +uwsgidecorators diff --git a/thesisenv/lib/python3.6/site-packages/uwsgidecorators.py b/thesisenv/lib/python3.6/site-packages/uwsgidecorators.py new file mode 100644 index 0000000..dd8e880 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/uwsgidecorators.py @@ -0,0 +1,419 @@ +from functools import partial +import sys +from threading import Thread + +try: + import cPickle as pickle +except: + import pickle + +import uwsgi + +if uwsgi.masterpid() == 0: + raise Exception( + "you have to enable the uWSGI master process to use this module") + +spooler_functions = {} +mule_functions = {} +postfork_chain = [] + + +# Python3 compatibility +def _encode1(val): + if sys.version_info >= (3, 0) and isinstance(val, str): + return val.encode('utf-8') + else: + return val + + +def _decode1(val): + if sys.version_info >= (3, 0) and isinstance(val, bytes): + return val.decode('utf-8') + else: + return val + + +def _encode_to_spooler(vars): + return dict((_encode1(K), _encode1(V)) for (K, V) in vars.items()) + + +def _decode_from_spooler(vars): + return dict((_decode1(K), _decode1(V)) for (K, V) in vars.items()) + + +def get_free_signal(): + for signum in range(0, 256): + if not uwsgi.signal_registered(signum): + return signum + + raise Exception("No free uwsgi signal available") + + +def manage_spool_request(vars): + # To check whether 'args' is in vals or not - decode the keys first, + # because in python3 all keys in 'vals' are have 'byte' types + vars = dict((_decode1(K), V) for (K, V) in vars.items()) + if 'args' in vars: + for k in ('args', 'kwargs'): + vars[k] = pickle.loads(vars.pop(k)) + + vars = _decode_from_spooler(vars) + f = spooler_functions[vars['ud_spool_func']] + + if 'args' in vars: + ret = f(*vars['args'], **vars['kwargs']) + else: + ret = f(vars) + + return int(vars.get('ud_spool_ret', ret)) + + +def postfork_chain_hook(): + for f in postfork_chain: + f() + +uwsgi.spooler = manage_spool_request +uwsgi.post_fork_hook = postfork_chain_hook + + +class postfork(object): + def __init__(self, f): + if callable(f): + self.wid = 0 + self.f = f + else: + self.f = None + self.wid = f + postfork_chain.append(self) + def __call__(self, *args, **kwargs): + if self.f: + if self.wid > 0 and self.wid != uwsgi.worker_id(): + return + return self.f() + self.f = args[0] + + +class _spoolraw(object): + + def __call__(self, *args, **kwargs): + arguments = self.base_dict.copy() + if not self.pass_arguments: + if len(args) > 0: + arguments.update(args[0]) + if kwargs: + arguments.update(kwargs) + else: + spooler_args = {} + for key in ('message_dict', 'spooler', 'priority', 'at', 'body'): + if key in kwargs: + spooler_args.update({key: kwargs.pop(key)}) + arguments.update(spooler_args) + arguments.update( + {'args': pickle.dumps(args), 'kwargs': pickle.dumps(kwargs)}) + return uwsgi.spool(_encode_to_spooler(arguments)) + + # For backward compatibility (uWSGI < 1.9.13) + def spool(self, *args, **kwargs): + return self.__class__.__call__(self, *args, **kwargs) + + def __init__(self, f, pass_arguments): + if not 'spooler' in uwsgi.opt: + raise Exception( + "you have to enable the uWSGI spooler to use @%s decorator" % self.__class__.__name__) + self.f = f + spooler_functions[self.f.__name__] = self.f + # For backward compatibility (uWSGI < 1.9.13) + self.f.spool = self.__call__ + self.pass_arguments = pass_arguments + self.base_dict = {'ud_spool_func': self.f.__name__} + + +class _spool(_spoolraw): + + def __call__(self, *args, **kwargs): + self.base_dict['ud_spool_ret'] = str(uwsgi.SPOOL_OK) + return _spoolraw.__call__(self, *args, **kwargs) + + +class _spoolforever(_spoolraw): + + def __call__(self, *args, **kwargs): + self.base_dict['ud_spool_ret'] = str(uwsgi.SPOOL_RETRY) + return _spoolraw.__call__(self, *args, **kwargs) + + +def spool_decorate(f=None, pass_arguments=False, _class=_spoolraw): + if not f: + return partial(_class, pass_arguments=pass_arguments) + return _class(f, pass_arguments) + + +def spoolraw(f=None, pass_arguments=False): + return spool_decorate(f, pass_arguments) + + +def spool(f=None, pass_arguments=False): + return spool_decorate(f, pass_arguments, _spool) + + +def spoolforever(f=None, pass_arguments=False): + return spool_decorate(f, pass_arguments, _spoolforever) + + +class mulefunc(object): + + def __init__(self, f): + if callable(f): + self.fname = f.__name__ + self.mule = 0 + mule_functions[f.__name__] = f + else: + self.mule = f + self.fname = None + + def real_call(self, *args, **kwargs): + uwsgi.mule_msg(pickle.dumps( + { + 'service': 'uwsgi_mulefunc', + 'func': self.fname, + 'args': args, + 'kwargs': kwargs + } + ), self.mule) + + def __call__(self, *args, **kwargs): + if not self.fname: + self.fname = args[0].__name__ + mule_functions[self.fname] = args[0] + return self.real_call + + return self.real_call(*args, **kwargs) + + +def mule_msg_dispatcher(message): + msg = pickle.loads(message) + if msg['service'] == 'uwsgi_mulefunc': + return mule_functions[msg['func']](*msg['args'], **msg['kwargs']) + +uwsgi.mule_msg_hook = mule_msg_dispatcher + + +class rpc(object): + + def __init__(self, name): + self.name = name + + def __call__(self, f): + uwsgi.register_rpc(self.name, f) + return f + + +class farm_loop(object): + + def __init__(self, f, farm): + self.f = f + self.farm = farm + + def __call__(self): + if uwsgi.mule_id() == 0: + return + if not uwsgi.in_farm(self.farm): + return + while True: + message = uwsgi.farm_get_msg() + if message: + self.f(message) + + +class farm(object): + + def __init__(self, name=None, **kwargs): + self.name = name + + def __call__(self, f): + postfork_chain.append(farm_loop(f, self.name)) + + +class mule_brain(object): + + def __init__(self, f, num): + self.f = f + self.num = num + + def __call__(self): + if uwsgi.mule_id() == self.num: + try: + self.f() + except: + exc = sys.exc_info() + sys.excepthook(exc[0], exc[1], exc[2]) + sys.exit(1) + + +class mule_brainloop(mule_brain): + + def __call__(self): + if uwsgi.mule_id() == self.num: + while True: + try: + self.f() + except: + exc = sys.exc_info() + sys.excepthook(exc[0], exc[1], exc[2]) + sys.exit(1) + + +class mule(object): + def __init__(self, num): + self.num = num + + def __call__(self, f): + postfork_chain.append(mule_brain(f, self.num)) + + +class muleloop(mule): + def __call__(self, f): + postfork_chain.append(mule_brainloop(f, self.num)) + + +class mulemsg_loop(object): + + def __init__(self, f, num): + self.f = f + self.num = num + + def __call__(self): + if uwsgi.mule_id() == self.num: + while True: + message = uwsgi.mule_get_msg() + if message: + self.f(message) + + +class mulemsg(object): + def __init__(self, num): + self.num = num + + def __call__(self, f): + postfork_chain.append(mulemsg_loop(f, self.num)) + + +class signal(object): + + def __init__(self, num, **kwargs): + self.num = num + self.target = kwargs.get('target', '') + + def __call__(self, f): + uwsgi.register_signal(self.num, self.target, f) + return f + + +class timer(object): + + def __init__(self, secs, **kwargs): + self.num = kwargs.get('signum', get_free_signal()) + self.secs = secs + self.target = kwargs.get('target', '') + + def __call__(self, f): + uwsgi.register_signal(self.num, self.target, f) + uwsgi.add_timer(self.num, self.secs) + return f + + +class cron(object): + + def __init__(self, minute, hour, day, month, dayweek, **kwargs): + self.num = kwargs.get('signum', get_free_signal()) + self.minute = minute + self.hour = hour + self.day = day + self.month = month + self.dayweek = dayweek + self.target = kwargs.get('target', '') + + def __call__(self, f): + uwsgi.register_signal(self.num, self.target, f) + uwsgi.add_cron(self.num, self.minute, self.hour, + self.day, self.month, self.dayweek) + return f + + +class rbtimer(object): + + def __init__(self, secs, **kwargs): + self.num = kwargs.get('signum', get_free_signal()) + self.secs = secs + self.target = kwargs.get('target', '') + + def __call__(self, f): + uwsgi.register_signal(self.num, self.target, f) + uwsgi.add_rb_timer(self.num, self.secs) + return f + + +class filemon(object): + + def __init__(self, fsobj, **kwargs): + self.num = kwargs.get('signum', get_free_signal()) + self.fsobj = fsobj + self.target = kwargs.get('target', '') + + def __call__(self, f): + uwsgi.register_signal(self.num, self.target, f) + uwsgi.add_file_monitor(self.num, self.fsobj) + return f + + +class erlang(object): + + def __init__(self, name): + self.name = name + + def __call__(self, f): + uwsgi.erlang_register_process(self.name, f) + return f + + +class lock(object): + def __init__(self, f): + self.f = f + + def __call__(self, *args, **kwargs): + # ensure the spooler will not call it + if uwsgi.i_am_the_spooler(): + return + uwsgi.lock() + try: + return self.f(*args, **kwargs) + finally: + uwsgi.unlock() + + +class thread(object): + + def __init__(self, f): + self.f = f + + def __call__(self, *args): + t = Thread(target=self.f, args=args) + t.daemon = True + t.start() + return self.f + + +class harakiri(object): + + def __init__(self, seconds): + self.s = seconds + + def real_call(self, *args, **kwargs): + uwsgi.set_user_harakiri(self.s) + r = self.f(*args, **kwargs) + uwsgi.set_user_harakiri(0) + return r + + def __call__(self, f): + self.f = f + return self.real_call